code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch('socket.socket' )
@patch('builtins.open' )
def UpperCAmelCase_ ( __UpperCAmelCase : int , __UpperCAmelCase : int ) -> Union[str, Any]:
# ===== initialization =====
SCREAMING_SNAKE_CASE_ = Mock()
SCREAMING_SNAKE_CASE_ = conn, Mock()
SCREAMING_SNAKE_CASE_ = iter([1, None] )
SCREAMING_SNAKE_CASE_ = lambda __UpperCAmelCase : next(__UpperCAmelCase )
# ===== invoke =====
send_file(filename='mytext.txt' , testing=__UpperCAmelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once() | 31 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)' )
UpperCamelCase_ = input_file.read()
UpperCamelCase_ = regexp.search(_UpperCAmelCase )
return match
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
with open(_UpperCAmelCase , encoding='utf-8' ) as input_file:
UpperCamelCase_ = re.compile(R'#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()' , re.DOTALL )
UpperCamelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase_ = regexp.finditer(_UpperCAmelCase )
UpperCamelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def _UpperCAmelCase ( self ) -> List[str]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(_UpperCAmelCase ) ):
raise AssertionError(f"""open(...) must use utf-8 encoding in {dataset}""" )
def _UpperCAmelCase ( self ) -> Optional[int]:
UpperCamelCase_ = Path('./datasets' )
UpperCamelCase_ = list(dataset_paths.absolute().glob('**/*.py' ) )
for dataset in dataset_files:
if self._no_print_statements(str(_UpperCAmelCase ) ):
raise AssertionError(f"""print statement found in {dataset}. Use datasets.logger/logging instead.""" )
| 23 | 0 |
import functools
import logging
import os
import sys
import threading
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
import huggingface_hub.utils as hf_hub_utils
from tqdm import auto as tqdm_lib
__magic_name__ = threading.Lock()
__magic_name__ = None
__magic_name__ = {
'''debug''': logging.DEBUG,
'''info''': logging.INFO,
'''warning''': logging.WARNING,
'''error''': logging.ERROR,
'''critical''': logging.CRITICAL,
}
__magic_name__ = logging.WARNING
__magic_name__ = True
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = os.getenv("TRANSFORMERS_VERBOSITY" , __lowerCAmelCase )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
F"""Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, """
F"""has to be one of: { ", ".join(log_levels.keys() ) }""" )
return _default_log_level
def SCREAMING_SNAKE_CASE__ ( ):
return __name__.split("." )[0]
def SCREAMING_SNAKE_CASE__ ( ):
return logging.getLogger(_get_library_name() )
def SCREAMING_SNAKE_CASE__ ( ):
global _default_handler
with _lock:
if _default_handler:
# This library has already configured the library root logger.
return
snake_case__ = logging.StreamHandler() # Set sys.stderr as stream.
snake_case__ = sys.stderr.flush
# Apply our default configuration to the library root logger.
snake_case__ = _get_library_root_logger()
library_root_logger.addHandler(_default_handler )
library_root_logger.setLevel(_get_default_logging_level() )
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( ):
global _default_handler
with _lock:
if not _default_handler:
return
snake_case__ = _get_library_root_logger()
library_root_logger.removeHandler(_default_handler )
library_root_logger.setLevel(logging.NOTSET )
snake_case__ = None
def SCREAMING_SNAKE_CASE__ ( ):
return log_levels
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase = None ):
if name is None:
snake_case__ = _get_library_name()
_configure_library_root_logger()
return logging.getLogger(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
return _get_library_root_logger().getEffectiveLevel()
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
_configure_library_root_logger()
_get_library_root_logger().setLevel(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ):
return set_verbosity(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().removeHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
assert _default_handler is not None
_get_library_root_logger().addHandler(_default_handler )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
_configure_library_root_logger()
assert handler is not None
_get_library_root_logger().addHandler(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
_configure_library_root_logger()
assert handler is not None and handler not in _get_library_root_logger().handlers
_get_library_root_logger().removeHandler(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
snake_case__ = False
def SCREAMING_SNAKE_CASE__ ( ):
_configure_library_root_logger()
snake_case__ = True
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = _get_library_root_logger().handlers
for handler in handlers:
snake_case__ = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s" )
handler.setFormatter(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( ):
snake_case__ = _get_library_root_logger().handlers
for handler in handlers:
handler.setFormatter(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
snake_case__ = os.getenv("TRANSFORMERS_NO_ADVISORY_WARNINGS" , __lowerCAmelCase )
if no_advisory_warnings:
return
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ = warning_advice
@functools.lru_cache(__lowerCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
self.warning(*__lowerCAmelCase , **__lowerCAmelCase )
__magic_name__ = warning_once
class _SCREAMING_SNAKE_CASE :
def __init__( self , *lowerCamelCase , **lowerCamelCase ): # pylint: disable=unused-argument
snake_case__ = args[0] if args else None
def __iter__( self ):
return iter(self._iterator )
def __getattr__( self , lowerCamelCase ):
def empty_fn(*lowerCamelCase , **lowerCamelCase ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self ):
return self
def __exit__( self , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
return
class _SCREAMING_SNAKE_CASE :
def __call__( self , *lowerCamelCase , **lowerCamelCase ):
if _tqdm_active:
return tqdm_lib.tqdm(*lowerCamelCase , **lowerCamelCase )
else:
return EmptyTqdm(*lowerCamelCase , **lowerCamelCase )
def A_ ( self , *lowerCamelCase , **lowerCamelCase ):
snake_case__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*lowerCamelCase , **lowerCamelCase )
def A_ ( self ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
__magic_name__ = _tqdm_cls()
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
return bool(_tqdm_active )
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
snake_case__ = True
hf_hub_utils.enable_progress_bars()
def SCREAMING_SNAKE_CASE__ ( ):
global _tqdm_active
snake_case__ = False
hf_hub_utils.disable_progress_bars()
| 719 |
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Any = (KDPMaDiscreteScheduler,)
_A : Dict = 10
def A_ ( self , **lowerCamelCase ):
snake_case__ = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**lowerCamelCase )
return config
def A_ ( self ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase )
def A_ ( self ):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=lowerCamelCase , beta_end=lowerCamelCase )
def A_ ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowerCamelCase )
def A_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase )
def A_ ( self ):
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config(prediction_type="v_prediction" )
snake_case__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ = model(lowerCamelCase , lowerCamelCase )
snake_case__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.69_34e-07 ) < 1e-2
assert abs(result_mean.item() - 6.11_12e-10 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_93_42_86_50_17_09_72e-07 ) < 1e-2
assert abs(result_mean.item() - 0.0_0_0_2 ) < 1e-3
def A_ ( self ):
if torch_device == "mps":
return
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case__ = sample.to(lowerCamelCase )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ = model(lowerCamelCase , lowerCamelCase )
snake_case__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ = torch.mean(torch.abs(lowerCamelCase ) )
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
def A_ ( self ):
if torch_device == "mps":
return
snake_case__ = self.scheduler_classes[0]
snake_case__ = self.get_scheduler_config()
snake_case__ = scheduler_class(**lowerCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=lowerCamelCase )
snake_case__ = self.dummy_model()
snake_case__ = self.dummy_sample_deter.to(lowerCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case__ = scheduler.scale_model_input(lowerCamelCase , lowerCamelCase )
snake_case__ = model(lowerCamelCase , lowerCamelCase )
snake_case__ = scheduler.step(lowerCamelCase , lowerCamelCase , lowerCamelCase )
snake_case__ = output.prev_sample
snake_case__ = torch.sum(torch.abs(lowerCamelCase ) )
snake_case__ = torch.mean(torch.abs(lowerCamelCase ) )
if str(lowerCamelCase ).startswith("cpu" ):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 2_0.4_1_2_5 ) < 1e-2
assert abs(result_mean.item() - 0.0_2_6_6 ) < 1e-3
| 530 | 0 |
'''simple docstring'''
__UpperCAmelCase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__UpperCAmelCase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__UpperCAmelCase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
} | 90 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""",
"""xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""",
"""xlm-roberta-large-finetuned-conll02-dutch""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll02-spanish""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-english""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"""
),
"""xlm-roberta-large-finetuned-conll03-german""": (
"""https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"""
),
}
class UpperCAmelCase ( __A ):
'''simple docstring'''
lowerCamelCase_ = '''xlm-roberta'''
def __init__( self , lowercase=3_0_5_2_2 , lowercase=7_6_8 , lowercase=1_2 , lowercase=1_2 , lowercase=3_0_7_2 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=5_1_2 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=1 , lowercase=0 , lowercase=2 , lowercase="absolute" , lowercase=True , lowercase=None , **lowercase , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase , bos_token_id=lowercase , eos_token_id=lowercase , **lowercase )
A_ : Union[str, Any] = vocab_size
A_ : int = hidden_size
A_ : Dict = num_hidden_layers
A_ : List[str] = num_attention_heads
A_ : Tuple = hidden_act
A_ : Optional[int] = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Union[str, Any] = type_vocab_size
A_ : List[str] = initializer_range
A_ : Tuple = layer_norm_eps
A_ : str = position_embedding_type
A_ : Any = use_cache
A_ : str = classifier_dropout
class UpperCAmelCase ( __A ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self ):
"""simple docstring"""
if self.task == "multiple-choice":
A_ : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A_ : str = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 558 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( snake_case_ ):
SCREAMING_SNAKE_CASE__ = '''ClapFeatureExtractor'''
SCREAMING_SNAKE_CASE__ = ('''RobertaTokenizer''', '''RobertaTokenizerFast''')
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
_lowerCAmelCase = kwargs.pop('''sampling_rate''' , _lowerCAmelCase )
if text is None and audios is None:
raise ValueError('''You have to specify either text or audios. Both cannot be none.''' )
if text is not None:
_lowerCAmelCase = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if audios is not None:
_lowerCAmelCase = self.feature_extractor(
_lowerCAmelCase , sampling_rate=_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and audios is not None:
_lowerCAmelCase = audio_features.input_features
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __lowerCAmelCase ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __lowerCAmelCase ( self ):
_lowerCAmelCase = self.tokenizer.model_input_names
_lowerCAmelCase = self.feature_extractor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + feature_extractor_input_names ) ) | 664 |
def UpperCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] )->Any: # noqa: E741
_lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
_lowerCAmelCase = 0
_lowerCAmelCase = [0] * n
_lowerCAmelCase = [False] * n
_lowerCAmelCase = [False] * n
def dfs(_SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ):
if parent == root:
out_edge_count += 1
_lowerCAmelCase = True
_lowerCAmelCase = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = min(low[at] , low[to] )
# AP found via bridge
if at < low[to]:
_lowerCAmelCase = True
# AP found via cycle
if at == low[to]:
_lowerCAmelCase = True
else:
_lowerCAmelCase = min(low[at] , _SCREAMING_SNAKE_CASE )
return out_edge_count
for i in range(_SCREAMING_SNAKE_CASE ):
if not visited[i]:
_lowerCAmelCase = 0
_lowerCAmelCase = dfs(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 , _SCREAMING_SNAKE_CASE )
_lowerCAmelCase = out_edge_count > 1
for x in range(len(_SCREAMING_SNAKE_CASE ) ):
if is_art[x] is True:
print(_SCREAMING_SNAKE_CASE )
# Adjacency list of graph
UpperCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data) | 664 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
def lowercase ( _a=None ,_a=None ) -> List[Any]:
return field(default_factory=lambda: default ,metadata=_a )
@dataclass
class UpperCAmelCase__ :
snake_case_ = list_field(
default=[] , metadata={
'''help''': (
'''Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version'''
''' of all available models'''
)
} , )
snake_case_ = list_field(
default=[8] , metadata={'''help''': '''List of batch sizes for which memory and time performance will be evaluated'''} )
snake_case_ = list_field(
default=[8, 32, 128, 512] , metadata={'''help''': '''List of sequence lengths for which memory and time performance will be evaluated'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to benchmark inference of model. Inference can be disabled via --no-inference.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available cuda devices. Cuda can be disabled via --no-cuda.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to run on available tpu devices. TPU can be disabled via --no-tpu.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Use FP16 to accelerate inference.'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Benchmark training of model'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Verbose memory tracing'''} )
snake_case_ = field(
default=snake_case__ , metadata={'''help''': '''Whether to perform speed measurements. Speed measurements can be disabled via --no-speed.'''} , )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': '''Whether to perform memory measurements. Memory measurements can be disabled via --no-memory'''
} , )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Trace memory line by line'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save result to a CSV file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Save all print statements in a log file'''} )
snake_case_ = field(default=snake_case__ , metadata={'''help''': '''Whether to print environment information'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use'''
''' multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled'''
''' for debugging / testing and on TPU.'''
)
} , )
snake_case_ = field(
default=F'inference_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv.'''} , )
snake_case_ = field(
default=F'inference_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv.'''} , )
snake_case_ = field(
default=F'train_time_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving time results to csv for training.'''} , )
snake_case_ = field(
default=F'train_memory_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving memory results to csv for training.'''} , )
snake_case_ = field(
default=F'env_info_{round(time() )}.csv' , metadata={'''help''': '''CSV filename used if saving environment information.'''} , )
snake_case_ = field(
default=F'log_{round(time() )}.csv' , metadata={'''help''': '''Log filename used if print statements are saved in log.'''} , )
snake_case_ = field(default=3 , metadata={'''help''': '''Times an experiment will be run.'''} )
snake_case_ = field(
default=snake_case__ , metadata={
'''help''': (
'''Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain'''
''' model weights.'''
)
} , )
def snake_case_ ( self ):
"""simple docstring"""
warnings.warn(
F"The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"
" are deprecated in general and it is advised to use external Benchmarking libraries "
" to benchmark Transformer models." , A__ , )
def snake_case_ ( self ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def snake_case_ ( self ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
"Please make sure you provide at least one model name / model identifier, *e.g.* `--models"
" bert-base-cased` or `args.models = ['bert-base-cased']." )
return self.models
@property
def snake_case_ ( self ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("Multiprocessing is currently not possible on TPU." )
return False
else:
return True | 137 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""google/bit-50""": """https://huggingface.co/google/bit-50/resolve/main/config.json""",
}
class UpperCAmelCase__ ( snake_case__ , snake_case__ ):
snake_case_ = '''bit'''
snake_case_ = ['''preactivation''', '''bottleneck''']
snake_case_ = ['''SAME''', '''VALID''']
def __init__( self , A__=3 , A__=64 , A__=[256, 512, 1024, 2048] , A__=[3, 4, 6, 3] , A__="preactivation" , A__="relu" , A__=None , A__=32 , A__=0.0 , A__=False , A__=32 , A__=1 , A__=None , A__=None , **A__ , ):
"""simple docstring"""
super().__init__(**A__ )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCAmelCase_: str = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
UpperCAmelCase_: List[Any] = num_channels
UpperCAmelCase_: List[Any] = embedding_size
UpperCAmelCase_: Union[str, Any] = hidden_sizes
UpperCAmelCase_: int = depths
UpperCAmelCase_: Tuple = layer_type
UpperCAmelCase_: str = hidden_act
UpperCAmelCase_: str = global_padding
UpperCAmelCase_: Dict = num_groups
UpperCAmelCase_: Dict = drop_path_rate
UpperCAmelCase_: Tuple = embedding_dynamic_padding
UpperCAmelCase_: Tuple = output_stride
UpperCAmelCase_: Dict = width_factor
UpperCAmelCase_: str = ["stem"] + [F"stage{idx}" for idx in range(1 , len(A__ ) + 1 )]
UpperCAmelCase_ , UpperCAmelCase_: Tuple = get_aligned_output_features_output_indices(
out_features=A__ , out_indices=A__ , stage_names=self.stage_names ) | 137 | 1 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case , snake_case ) -> Tuple:
_UpperCAmelCase = s.rsplit(snake_case , snake_case )
return new.join(snake_case )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> Union[str, Any]:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() )
def _SCREAMING_SNAKE_CASE ( snake_case ) -> int:
_UpperCAmelCase = {}
_UpperCAmelCase = ["""group_1""", """group_2""", """group_3""", """group_4"""]
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_UpperCAmelCase = key.replace(f"{group_key}." , f"{group_key}.group." )
if "res_path" in key:
_UpperCAmelCase = key.replace("""res_path.""" , """res_path.path.""" )
if key.endswith(""".w""" ):
_UpperCAmelCase = rreplace(snake_case , """.w""" , """.weight""" , 1 )
if key.endswith(""".b""" ):
_UpperCAmelCase = rreplace(snake_case , """.b""" , """.bias""" , 1 )
_UpperCAmelCase = value.float()
return upgrade
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case , snake_case=None , snake_case=True ) -> str:
from dall_e import Encoder
_UpperCAmelCase = Encoder()
if os.path.exists(snake_case ):
_UpperCAmelCase = torch.load(snake_case )
else:
_UpperCAmelCase = torch.hub.load_state_dict_from_url(snake_case )
if isinstance(snake_case , snake_case ):
_UpperCAmelCase = ckpt.state_dict()
encoder.load_state_dict(snake_case )
if config_path is not None:
_UpperCAmelCase = FlavaImageCodebookConfig.from_pretrained(snake_case )
else:
_UpperCAmelCase = FlavaImageCodebookConfig()
_UpperCAmelCase = FlavaImageCodebook(snake_case ).eval()
_UpperCAmelCase = encoder.state_dict()
_UpperCAmelCase = upgrade_state_dict(snake_case )
hf_model.load_state_dict(snake_case )
_UpperCAmelCase = hf_model.state_dict()
_UpperCAmelCase = count_parameters(snake_case )
_UpperCAmelCase = count_parameters(snake_case )
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case )
else:
return hf_state_dict
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to flava checkpoint")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
a = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 175 |
import itertools
import string
from collections.abc import Generator, Iterable
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> Generator[tuple[str, ...], None, None]:
_UpperCAmelCase = iter(snake_case )
while True:
_UpperCAmelCase = tuple(itertools.islice(snake_case , snake_case ) )
if not chunk:
return
yield chunk
def _SCREAMING_SNAKE_CASE ( snake_case ) -> str:
_UpperCAmelCase = """""".join([c.upper() for c in dirty if c in string.ascii_letters] )
_UpperCAmelCase = """"""
if len(snake_case ) < 2:
return dirty
for i in range(len(snake_case ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(snake_case ) & 1:
clean += "X"
return clean
def _SCREAMING_SNAKE_CASE ( snake_case ) -> list[str]:
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
_UpperCAmelCase = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
_UpperCAmelCase = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(snake_case )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(snake_case )
return table
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> str:
_UpperCAmelCase = generate_table(snake_case )
_UpperCAmelCase = prepare_input(snake_case )
_UpperCAmelCase = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case , 2 ):
_UpperCAmelCase , _UpperCAmelCase = divmod(table.index(snake_case ) , 5 )
_UpperCAmelCase , _UpperCAmelCase = divmod(table.index(snake_case ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> str:
_UpperCAmelCase = generate_table(snake_case )
_UpperCAmelCase = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(snake_case , 2 ):
_UpperCAmelCase , _UpperCAmelCase = divmod(table.index(snake_case ) , 5 )
_UpperCAmelCase , _UpperCAmelCase = divmod(table.index(snake_case ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext | 175 | 1 |
from ...processing_utils import ProcessorMixin
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = '''SpeechT5FeatureExtractor'''
lowerCamelCase : Optional[Any] = '''SpeechT5Tokenizer'''
def __init__( self : int , _lowercase : List[str] , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
def __call__( self : Dict , *_lowercase : Any , **_lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop('''audio''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop('''text''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop('''text_target''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop('''audio_target''' , _lowercase )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop('''sampling_rate''' , _lowercase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
elif text is not None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(_lowercase , **_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
if audio_target is not None:
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor(audio_target=_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : str = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE__ : List[str] = labels
SCREAMING_SNAKE_CASE__ : List[Any] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE__ : List[str] = decoder_attention_mask
return inputs
def lowercase__ ( self : List[str] , *_lowercase : Optional[Any] , **_lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop('''input_values''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop('''input_ids''' , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop('''labels''' , _lowercase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
elif input_ids is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.pad(_lowercase , **_lowercase )
else:
SCREAMING_SNAKE_CASE__ : str = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowercase , _lowercase ) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.pad(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = feature_size_hack
SCREAMING_SNAKE_CASE__ : Optional[Any] = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE__ : Dict = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE__ : int = labels
SCREAMING_SNAKE_CASE__ : Dict = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE__ : Any = decoder_attention_mask
return inputs
def lowercase__ ( self : Optional[int] , *_lowercase : Any , **_lowercase : Optional[Any] ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , *_lowercase : Any , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
| 35 |
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class UpperCamelCase_ ( snake_case_ ):
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = False
lowerCAmelCase = 3.0
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self ) -> List[Any]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {'a': 2} )
self.assertDictEqual(MockClass(a=2 , b=a ).to_kwargs() , {'a': 2, 'b': True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {'a': 2, 'c': 2.25} )
@require_cuda
def _UpperCamelCase ( self ) -> str:
# If no defaults are changed, `to_kwargs` returns an empty dict.
snake_case_ = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
snake_case_ = Accelerator(mixed_precision='fp16' , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
snake_case_ = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 1_024.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , a )
@require_multi_gpu
def _UpperCamelCase ( self ) -> Optional[Any]:
snake_case_ = ['torchrun', F'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
execute_subprocess_async(a , env=os.environ.copy() )
if __name__ == "__main__":
lowercase = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
lowercase = Accelerator(kwargs_handlers=[ddp_scaler])
lowercase = torch.nn.Linear(100, 200)
lowercase = accelerator.prepare(model)
# Check the values changed in kwargs
lowercase = ""
lowercase = model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 198 | 0 |
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class _a :
_UpperCamelCase: Dict = None
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict )
lowerCAmelCase : Tuple = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , UpperCamelCase__ )
def _snake_case ( self ) -> str:
lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Any = os.path.join(UpperCamelCase__ , """feat_extract.json""" )
feat_extract_first.to_json_file(UpperCamelCase__ )
lowerCAmelCase : Dict = self.feature_extraction_class.from_json_file(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ) -> List[str]:
lowerCAmelCase : Any = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase : Any = feat_extract_first.save_pretrained(UpperCamelCase__ )[0]
check_json_file_has_correct_format(UpperCamelCase__ )
lowerCAmelCase : Optional[int] = self.feature_extraction_class.from_pretrained(UpperCamelCase__ )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def _snake_case ( self ) -> Tuple:
lowerCAmelCase : str = self.feature_extraction_class()
self.assertIsNotNone(UpperCamelCase__ )
| 711 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : Optional[int] =logging.get_logger(__name__)
lowerCAmelCase : Optional[int] ={
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class _a ( snake_case_ ):
_UpperCamelCase: Tuple = "transfo-xl"
_UpperCamelCase: str = ["mems"]
_UpperCamelCase: Dict = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , lowercase_=267735 , lowercase_=[20000, 40000, 200000] , lowercase_=1024 , lowercase_=1024 , lowercase_=16 , lowercase_=64 , lowercase_=4096 , lowercase_=4 , lowercase_=False , lowercase_=18 , lowercase_=1600 , lowercase_=1000 , lowercase_=True , lowercase_=True , lowercase_=0 , lowercase_=-1 , lowercase_=True , lowercase_=0.1 , lowercase_=0.0 , lowercase_=True , lowercase_="normal" , lowercase_=0.0_1 , lowercase_=0.0_1 , lowercase_=0.0_2 , lowercase_=1e-5 , lowercase_=0 , **lowercase_ , ) -> Optional[int]:
lowerCAmelCase : List[str] = vocab_size
lowerCAmelCase : Union[str, Any] = []
self.cutoffs.extend(lowercase_ )
if proj_share_all_but_first:
lowerCAmelCase : Optional[int] = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase : List[str] = [False] + [False] * len(self.cutoffs )
lowerCAmelCase : Optional[int] = d_model
lowerCAmelCase : List[Any] = d_embed
lowerCAmelCase : Union[str, Any] = d_head
lowerCAmelCase : List[Any] = d_inner
lowerCAmelCase : Optional[int] = div_val
lowerCAmelCase : List[Any] = pre_lnorm
lowerCAmelCase : Dict = n_layer
lowerCAmelCase : Tuple = n_head
lowerCAmelCase : Any = mem_len
lowerCAmelCase : Union[str, Any] = same_length
lowerCAmelCase : List[Any] = attn_type
lowerCAmelCase : int = clamp_len
lowerCAmelCase : List[str] = sample_softmax
lowerCAmelCase : Optional[int] = adaptive
lowerCAmelCase : Dict = dropout
lowerCAmelCase : Optional[Any] = dropatt
lowerCAmelCase : List[str] = untie_r
lowerCAmelCase : List[str] = init
lowerCAmelCase : Tuple = init_range
lowerCAmelCase : str = proj_init_std
lowerCAmelCase : str = init_std
lowerCAmelCase : Optional[int] = layer_norm_epsilon
super().__init__(eos_token_id=lowercase_ , **lowercase_ )
@property
def _snake_case ( self ) -> Optional[Any]:
# Message copied from Transformer-XL documentation
logger.info(f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
return -1
@max_position_embeddings.setter
def _snake_case ( self , lowercase_ ) -> Dict:
# Message copied from Transformer-XL documentation
raise NotImplementedError(
f"""The model {self.model_type} is one of the few models that has no sequence length limit.""" )
| 693 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A : Union[str, Any] = logging.getLogger(__name__)
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""})
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""})
__magic_name__ : Optional[str] = field(
default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""})
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""})
__magic_name__ : bool = field(default=_UpperCamelCase , metadata={"""help""": """Set this flag to use fast tokenization."""})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
__magic_name__ : str = field(
metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""})
__magic_name__ : Optional[str] = field(
default=_UpperCamelCase , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , )
__magic_name__ : int = field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__magic_name__ : bool = field(
default=_UpperCamelCase , metadata={"""help""": """Overwrite the cached training and evaluation sets"""})
def lowercase ( ):
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ : int =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A__ , A__ , A__ : Optional[Any] =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A__ , A__ , A__ : Union[str, Any] =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
A__ : List[Any] =import_module("tasks" )
try:
A__ : Tuple =getattr(UpperCamelCase , model_args.task_type )
A__ : TokenClassificationTask =token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , UpperCamelCase )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
A__ : int =token_classification_task.get_labels(data_args.labels )
A__ : Dict[int, str] =dict(enumerate(UpperCamelCase ) )
A__ : Optional[int] =len(UpperCamelCase )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ : Optional[Any] =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase , idalabel=UpperCamelCase , labelaid={label: i for i, label in enumerate(UpperCamelCase )} , cache_dir=model_args.cache_dir , )
A__ : Any =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
A__ : Optional[int] =AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=UpperCamelCase , cache_dir=model_args.cache_dir , )
# Get datasets
A__ : Any =(
TokenClassificationDataset(
token_classification_task=UpperCamelCase , data_dir=data_args.data_dir , tokenizer=UpperCamelCase , labels=UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
A__ : Union[str, Any] =(
TokenClassificationDataset(
token_classification_task=UpperCamelCase , data_dir=data_args.data_dir , tokenizer=UpperCamelCase , labels=UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray ) -> Tuple[List[int], List[int]]:
A__ : List[str] =np.argmax(UpperCamelCase , axis=2 )
A__ , A__ : Any =preds.shape
A__ : List[str] =[[] for _ in range(UpperCamelCase )]
A__ : Union[str, Any] =[[] for _ in range(UpperCamelCase )]
for i in range(UpperCamelCase ):
for j in range(UpperCamelCase ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(UpperCamelCase : EvalPrediction ) -> Dict:
A__ , A__ : Any =align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase , UpperCamelCase ),
"precision": precision_score(UpperCamelCase , UpperCamelCase ),
"recall": recall_score(UpperCamelCase , UpperCamelCase ),
"f1": fa_score(UpperCamelCase , UpperCamelCase ),
}
# Data collator
A__ : List[str] =DataCollatorWithPadding(UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
A__ : Optional[Any] =Trainer(
model=UpperCamelCase , args=UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , compute_metrics=UpperCamelCase , data_collator=UpperCamelCase , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A__ : List[Any] ={}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
A__ : List[str] =trainer.evaluate()
A__ : Optional[int] =os.path.join(training_args.output_dir , "eval_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(" %s = %s" , UpperCamelCase , UpperCamelCase )
writer.write("%s = %s\n" % (key, value) )
results.update(UpperCamelCase )
# Predict
if training_args.do_predict:
A__ : List[Any] =TokenClassificationDataset(
token_classification_task=UpperCamelCase , data_dir=data_args.data_dir , tokenizer=UpperCamelCase , labels=UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
A__ , A__ , A__ : Union[str, Any] =trainer.predict(UpperCamelCase )
A__ , A__ : Optional[int] =align_predictions(UpperCamelCase , UpperCamelCase )
A__ : List[str] =os.path.join(training_args.output_dir , "test_results.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase , "w" ) as writer:
for key, value in metrics.items():
logger.info(" %s = %s" , UpperCamelCase , UpperCamelCase )
writer.write("%s = %s\n" % (key, value) )
# Save predictions
A__ : Any =os.path.join(training_args.output_dir , "test_predictions.txt" )
if trainer.is_world_process_zero():
with open(UpperCamelCase , "w" ) as writer:
with open(os.path.join(data_args.data_dir , "test.txt" ) , "r" ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return results
def lowercase ( UpperCamelCase : List[Any] ):
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 656 | """simple docstring"""
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class __lowerCAmelCase ( unittest.TestCase):
'''simple docstring'''
def _UpperCAmelCase ( self : List[Any] ):
A__ : Tuple =torch.nn.Linear(10 , 10 )
A__ : List[str] =torch.optim.SGD(model.parameters() , 0.1 )
A__ : Union[str, Any] =Accelerator()
A__ : str =accelerator.prepare(UpperCamelCase__ )
try:
pickle.loads(pickle.dumps(UpperCamelCase__ ) )
except Exception as e:
self.fail(F'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state()
| 656 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCamelCase ( snake_case__ ,snake_case__ ,snake_case__=None ,snake_case__=None ) -> str:
"""simple docstring"""
if attention_mask is None:
_SCREAMING_SNAKE_CASE = tf.cast(tf.math.not_equal(lowercase__ ,config.pad_token_id ) ,tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class __UpperCAmelCase :
__snake_case : Tuple = OPTConfig
__snake_case : Any = {}
__snake_case : Tuple = "gelu"
def __init__( self: Union[str, Any] , UpperCAmelCase_: Any , UpperCAmelCase_: List[str]=13 , UpperCAmelCase_: Tuple=7 , UpperCAmelCase_: List[Any]=True , UpperCAmelCase_: Tuple=False , UpperCAmelCase_: Dict=99 , UpperCAmelCase_: List[str]=16 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Optional[Any]=4 , UpperCAmelCase_: str=4 , UpperCAmelCase_: Optional[int]="gelu" , UpperCAmelCase_: int=0.1 , UpperCAmelCase_: Dict=0.1 , UpperCAmelCase_: List[str]=20 , UpperCAmelCase_: Tuple=2 , UpperCAmelCase_: Optional[int]=1 , UpperCAmelCase_: str=0 , UpperCAmelCase_: Union[str, Any]=16 , UpperCAmelCase_: List[Any]=16 , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = embed_dim
_SCREAMING_SNAKE_CASE = word_embed_proj_dim
_SCREAMING_SNAKE_CASE = False
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_SCREAMING_SNAKE_CASE = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, eos_tensor] , axis=1 )
_SCREAMING_SNAKE_CASE = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowerCAmelCase_ , **self.config_updates , )
_SCREAMING_SNAKE_CASE = prepare_opt_inputs_dict(lowerCAmelCase_ , lowerCAmelCase_ )
return config, inputs_dict
def UpperCamelCase ( self: str , UpperCAmelCase_: List[Any] , UpperCAmelCase_: int ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFOPTModel(config=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = inputs_dict["""input_ids"""]
_SCREAMING_SNAKE_CASE = input_ids[:1, :]
_SCREAMING_SNAKE_CASE = inputs_dict["""attention_mask"""][:1, :]
_SCREAMING_SNAKE_CASE = 1
# first forward pass
_SCREAMING_SNAKE_CASE = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , use_cache=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((self.batch_size, 3) , config.vocab_size )
_SCREAMING_SNAKE_CASE = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_SCREAMING_SNAKE_CASE = tf.concat([input_ids, next_tokens] , axis=-1 )
_SCREAMING_SNAKE_CASE = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_SCREAMING_SNAKE_CASE = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ )[0]
_SCREAMING_SNAKE_CASE = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , past_key_values=lowerCAmelCase_ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_SCREAMING_SNAKE_CASE = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_SCREAMING_SNAKE_CASE = output_from_no_past[:, -3:, random_slice_idx]
_SCREAMING_SNAKE_CASE = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase_ , lowerCAmelCase_ , rtol=1E-3 )
@require_tf
class __UpperCAmelCase (__a ,__a ,unittest.TestCase ):
__snake_case : Tuple = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__snake_case : Dict = (TFOPTForCausalLM,) if is_tf_available() else ()
__snake_case : Tuple = (
{"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {}
)
__snake_case : List[Any] = False
__snake_case : List[str] = False
__snake_case : Dict = False
__snake_case : Tuple = 10
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFOPTModelTester(self )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=lowerCAmelCase_ )
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase_ )
def UpperCamelCase ( self: List[str] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(UpperCAmelCase_: Optional[int] , UpperCAmelCase_: Any ):
if hasattr(lowerCAmelCase_ , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(lowerCAmelCase_ , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_SCREAMING_SNAKE_CASE = model_class(config=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase_ , model.get_input_embeddings() )
_SCREAMING_SNAKE_CASE = _get_word_embedding_weight(lowerCAmelCase_ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_SCREAMING_SNAKE_CASE = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , lowerCAmelCase_ )
# check that weights remain the same after resizing
_SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_SCREAMING_SNAKE_CASE = False
self.assertTrue(lowerCAmelCase_ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_SCREAMING_SNAKE_CASE = False
self.assertTrue(lowerCAmelCase_ )
def __lowerCamelCase ( snake_case__ ) -> int:
"""simple docstring"""
return tf.constant(lowercase__ ,dtype=tf.intaa )
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
__snake_case : Optional[Any] = 99
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_SCREAMING_SNAKE_CASE = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_SCREAMING_SNAKE_CASE = input_ids.shape[0]
_SCREAMING_SNAKE_CASE = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class __UpperCAmelCase (unittest.TestCase ):
@slow
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
_SCREAMING_SNAKE_CASE = _long_tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
_SCREAMING_SNAKE_CASE = tf.not_equal(lowerCAmelCase_ , model.config.pad_token_id )
with tf.GradientTape():
_SCREAMING_SNAKE_CASE = model(input_ids=lowerCAmelCase_ , attention_mask=lowerCAmelCase_ ).last_hidden_state
_SCREAMING_SNAKE_CASE = (1, 11, 512)
self.assertEqual(output.shape , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tf.constant(
[[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] )
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4E-3 ) )
_SCREAMING_SNAKE_CASE = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = xla_generate(lowerCAmelCase_ , lowerCAmelCase_ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=4E-2 ) )
@require_tf
@slow
class __UpperCAmelCase (unittest.TestCase ):
def UpperCamelCase ( self: Tuple ):
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
def UpperCamelCase ( self: Union[str, Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(self.path_model )
_SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(self.path_model )
_SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , return_tensors="""tf""" , padding=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_SCREAMING_SNAKE_CASE = tf.constant(
[
[1.38_51, -13.8_923, -10.5_229, -10.7_533, -0.23_09, -10.2_384, -0.53_65, -9.09_47, -5.16_70],
[-4.70_73, -10.6_276, -3.94_15, -21.5_242, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22],
[0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.1_650, 1.41_46, -9.02_18, -0.27_03, -0.27_03],
[6.47_83, -1.99_13, -10.7_926, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77],
] )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-4 ) )
_SCREAMING_SNAKE_CASE = tf.function(lowerCAmelCase_ , jit_compile=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(lowerCAmelCase_ , lowerCAmelCase_ , atol=1E-4 ) )
@require_tf
@slow
class __UpperCAmelCase (unittest.TestCase ):
@property
def UpperCamelCase ( self: Dict ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """facebook/opt-125m"""
_SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , return_tensors="""tf""" ).input_ids
_SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase_ , max_length=10 )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def UpperCamelCase ( self: str ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
_SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = """left"""
# use different length sentences to test batching
_SCREAMING_SNAKE_CASE = [
"""Hello, my dog is a little""",
"""Today, I""",
]
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , return_tensors="""tf""" , padding=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
_SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase_ , attention_mask=inputs["""attention_mask"""] )
_SCREAMING_SNAKE_CASE = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
_SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
_SCREAMING_SNAKE_CASE = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
_SCREAMING_SNAKE_CASE = model.generate(input_ids=lowerCAmelCase_ , max_length=model.config.max_length - num_paddings )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = tokenizer.decode(output_padded[0] , skip_special_tokens=lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , [non_padded_sentence, padded_sentence] )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = """facebook/opt-350m"""
_SCREAMING_SNAKE_CASE = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
_SCREAMING_SNAKE_CASE = []
_SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE = TFOPTForCausalLM.from_pretrained(lowerCAmelCase_ )
for prompt in self.prompts:
_SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase_ , return_tensors="""tf""" ).input_ids
_SCREAMING_SNAKE_CASE = model.generate(lowerCAmelCase_ , max_length=10 )
_SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
predicted_outputs += generated_string
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 719 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class __UpperCAmelCase :
def __init__( self: Optional[Any] , UpperCAmelCase_: Tuple , UpperCAmelCase_: str=99 , UpperCAmelCase_: Optional[int]=13 , UpperCAmelCase_: Dict=16 , UpperCAmelCase_: Union[str, Any]=7 , UpperCAmelCase_: int=True , UpperCAmelCase_: int=True , UpperCAmelCase_: Any=True , UpperCAmelCase_: List[Any]=False , UpperCAmelCase_: str=True , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: int=32 , UpperCAmelCase_: Union[str, Any]=4 , UpperCAmelCase_: List[Any]=4 , UpperCAmelCase_: Optional[int]=30 , UpperCAmelCase_: Dict=0 , UpperCAmelCase_: List[str]=1 , UpperCAmelCase_: Optional[Any]=2 , UpperCAmelCase_: Union[str, Any]=None , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = parent
_SCREAMING_SNAKE_CASE = batch_size
_SCREAMING_SNAKE_CASE = decoder_seq_length
# For common tests
_SCREAMING_SNAKE_CASE = self.decoder_seq_length
_SCREAMING_SNAKE_CASE = is_training
_SCREAMING_SNAKE_CASE = use_attention_mask
_SCREAMING_SNAKE_CASE = use_labels
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = d_model
_SCREAMING_SNAKE_CASE = decoder_layers
_SCREAMING_SNAKE_CASE = decoder_layers
_SCREAMING_SNAKE_CASE = decoder_ffn_dim
_SCREAMING_SNAKE_CASE = decoder_attention_heads
_SCREAMING_SNAKE_CASE = decoder_attention_heads
_SCREAMING_SNAKE_CASE = eos_token_id
_SCREAMING_SNAKE_CASE = bos_token_id
_SCREAMING_SNAKE_CASE = pad_token_id
_SCREAMING_SNAKE_CASE = decoder_start_token_id
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = None
_SCREAMING_SNAKE_CASE = decoder_seq_length
_SCREAMING_SNAKE_CASE = 2
_SCREAMING_SNAKE_CASE = 1
def UpperCamelCase ( self: Optional[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = None
if self.use_attention_mask:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
_SCREAMING_SNAKE_CASE = None
if self.use_labels:
_SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
_SCREAMING_SNAKE_CASE = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def UpperCamelCase ( self: Tuple , UpperCAmelCase_: Tuple , UpperCAmelCase_: List[str] , UpperCAmelCase_: Union[str, Any] , UpperCAmelCase_: Optional[Any] , ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = TrOCRDecoder(config=UpperCAmelCase_ ).to(UpperCAmelCase_ ).eval()
_SCREAMING_SNAKE_CASE = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , use_cache=UpperCAmelCase_ )
self.parent.assertTrue(len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) )
self.parent.assertTrue(len(UpperCAmelCase_ ) == len(UpperCAmelCase_ ) + 1 )
_SCREAMING_SNAKE_CASE = outputs["""past_key_values"""]
# create hypothetical next token and extent to next_input_ids
_SCREAMING_SNAKE_CASE = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
_SCREAMING_SNAKE_CASE = torch.cat([input_ids, next_tokens] , dim=-1 )
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ )["""last_hidden_state"""]
_SCREAMING_SNAKE_CASE = model(UpperCAmelCase_ , past_key_values=UpperCAmelCase_ )["""last_hidden_state"""]
# select random slice
_SCREAMING_SNAKE_CASE = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_SCREAMING_SNAKE_CASE = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
_SCREAMING_SNAKE_CASE = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3 )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = config_and_inputs
_SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase (_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,unittest.TestCase ):
__snake_case : Optional[Any] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
__snake_case : Optional[Any] = (TrOCRForCausalLM,) if is_torch_available() else ()
__snake_case : Tuple = {"text-generation": TrOCRForCausalLM} if is_torch_available() else {}
__snake_case : str = True
__snake_case : List[str] = False
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = TrOCRStandaloneDecoderModelTester(self , is_training=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=UpperCAmelCase_ )
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Optional[int] ):
'''simple docstring'''
pass
def UpperCamelCase ( self: str ):
'''simple docstring'''
pass
def UpperCamelCase ( self: Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCamelCase ( self: List[Any] ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*UpperCAmelCase_ )
def UpperCamelCase ( self: Any ):
'''simple docstring'''
return
@unittest.skip("""The model doesn't support left padding""" ) # and it's not used enough to be worth fixing :)
def UpperCamelCase ( self: Any ):
'''simple docstring'''
pass
| 569 | 0 |
import unittest
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BridgeTowerImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , A_ : Union[str, Any] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : int = 32 , A_ : bool = True , A_ : Union[int, float] = 1 / 2_55 , A_ : bool = True , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , A_ : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , A_ : bool = True , A_ : Tuple=7 , A_ : Optional[Any]=30 , A_ : Any=4_00 , A_ : int=3 , ) -> Dict:
"""simple docstring"""
lowerCamelCase_: Tuple = parent
lowerCamelCase_: int = do_resize
lowerCamelCase_: List[Any] = size if size is not None else {"""shortest_edge""": 2_88}
lowerCamelCase_: str = size_divisor
lowerCamelCase_: str = do_rescale
lowerCamelCase_: Optional[Any] = rescale_factor
lowerCamelCase_: str = do_normalize
lowerCamelCase_: Optional[Any] = do_center_crop
lowerCamelCase_: str = image_mean
lowerCamelCase_: Tuple = image_std
lowerCamelCase_: Optional[int] = do_pad
lowerCamelCase_: Any = batch_size
lowerCamelCase_: List[str] = num_channels
lowerCamelCase_: Dict = min_resolution
lowerCamelCase_: List[str] = max_resolution
def lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"size_divisor": self.size_divisor,
}
def lowerCAmelCase ( self : Dict , A_ : Dict , A_ : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
if not batched:
lowerCamelCase_: Dict = self.size["""shortest_edge"""]
lowerCamelCase_: int = image_inputs[0]
if isinstance(A_ , Image.Image ):
lowerCamelCase_ , lowerCamelCase_: Any = image.size
else:
lowerCamelCase_ , lowerCamelCase_: Optional[Any] = image.shape[1], image.shape[2]
lowerCamelCase_: Dict = size / min(A_ , A_ )
if h < w:
lowerCamelCase_ , lowerCamelCase_: str = size, scale * w
else:
lowerCamelCase_ , lowerCamelCase_: Dict = scale * h, size
lowerCamelCase_: Optional[int] = int((13_33 / 8_00) * size )
if max(A_ , A_ ) > max_size:
lowerCamelCase_: List[str] = max_size / max(A_ , A_ )
lowerCamelCase_: Union[str, Any] = newh * scale
lowerCamelCase_: Optional[Any] = neww * scale
lowerCamelCase_ , lowerCamelCase_: List[Any] = int(newh + 0.5 ), int(neww + 0.5 )
lowerCamelCase_ , lowerCamelCase_: Optional[Any] = (
newh // self.size_divisor * self.size_divisor,
neww // self.size_divisor * self.size_divisor,
)
else:
lowerCamelCase_: List[str] = []
for image in image_inputs:
lowerCamelCase_ , lowerCamelCase_: Union[str, Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase_: Any = max(A_ , key=lambda A_ : item[0] )[0]
lowerCamelCase_: str = max(A_ , key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class a__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
_A = BridgeTowerImageProcessor if is_vision_available() else None
def lowerCAmelCase ( self : Dict ) -> str:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = BridgeTowerImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : int ) -> List[str]:
"""simple docstring"""
lowerCamelCase_: List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ , """image_mean""" ) )
self.assertTrue(hasattr(A_ , """image_std""" ) )
self.assertTrue(hasattr(A_ , """do_normalize""" ) )
self.assertTrue(hasattr(A_ , """do_resize""" ) )
self.assertTrue(hasattr(A_ , """size""" ) )
self.assertTrue(hasattr(A_ , """size_divisor""" ) )
def lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
pass
def lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
# Initialize image processor
lowerCamelCase_: str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase_: Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , Image.Image )
# Test not batched input
lowerCamelCase_: List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase_ , lowerCamelCase_: Tuple = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_: Tuple = image_processing(A_ , return_tensors="""pt""" ).pixel_values
lowerCamelCase_ , lowerCamelCase_: Tuple = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
# Initialize image processor
lowerCamelCase_: Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase_: Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , np.ndarray )
# Test not batched input
lowerCamelCase_: List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase_ , lowerCamelCase_: str = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_: List[Any] = image_processing(A_ , return_tensors="""pt""" ).pixel_values
lowerCamelCase_ , lowerCamelCase_: List[str] = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def lowerCAmelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
# Initialize image processor
lowerCamelCase_: List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase_: int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A_ , torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ , torch.Tensor )
# Test not batched input
lowerCamelCase_: Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
lowerCamelCase_ , lowerCamelCase_: Tuple = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase_: int = image_processing(A_ , return_tensors="""pt""" ).pixel_values
lowerCamelCase_ , lowerCamelCase_: str = self.image_processor_tester.get_expected_values(A_ , batched=A_ )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
| 423 | from __future__ import annotations
from collections.abc import MutableSequence
class a__ :
def __init__( self : Optional[Any] , A_ : int , A_ : MutableSequence[float] ) -> None:
"""simple docstring"""
if len(A_ ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
lowerCamelCase_: list[float] = list(A_ )
lowerCamelCase_: Any = degree
def __add__( self : str , A_ : Polynomial ) -> Polynomial:
"""simple docstring"""
if self.degree > polynomial_a.degree:
lowerCamelCase_: List[Any] = self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , A_ )
else:
lowerCamelCase_: Tuple = polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , A_ )
def __sub__( self : Any , A_ : Polynomial ) -> Polynomial:
"""simple docstring"""
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[Any] ) -> Polynomial:
"""simple docstring"""
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : int , A_ : Polynomial ) -> Polynomial:
"""simple docstring"""
lowerCamelCase_: list[float] = [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , A_ )
def lowerCAmelCase ( self : Union[str, Any] , A_ : int | float ) -> int | float:
"""simple docstring"""
lowerCamelCase_: int | float = 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCamelCase_: Union[str, Any] = """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(A_ )
return polynomial
def __repr__( self : List[Any] ) -> str:
"""simple docstring"""
return self.__str__()
def lowerCAmelCase ( self : Optional[int] ) -> Polynomial:
"""simple docstring"""
lowerCamelCase_: list[float] = [0] * self.degree
for i in range(self.degree ):
lowerCamelCase_: Dict = self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , A_ )
def lowerCAmelCase ( self : Any , A_ : int | float = 0 ) -> Polynomial:
"""simple docstring"""
lowerCamelCase_: list[float] = [0] * (self.degree + 2)
lowerCamelCase_: Union[str, Any] = constant
for i in range(self.degree + 1 ):
lowerCamelCase_: Tuple = self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , A_ )
def __eq__( self : Union[str, Any] , A_ : object ) -> bool:
"""simple docstring"""
if not isinstance(A_ , A_ ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , A_ : object ) -> bool:
"""simple docstring"""
return not self.__eq__(A_ )
| 423 | 1 |
import os
from pathlib import Path
def snake_case_ ( ):
'''simple docstring'''
from torch.utils.cpp_extension import load
_lowerCAmelCase =Path(lowercase__ ).resolve().parent.parent.parent / """kernels""" / """deformable_detr"""
_lowerCAmelCase =[
root / filename
for filename in [
"""vision.cpp""",
os.path.join("""cpu""" , """ms_deform_attn_cpu.cpp""" ),
os.path.join("""cuda""" , """ms_deform_attn_cuda.cu""" ),
]
]
load(
"""MultiScaleDeformableAttention""" , lowercase__ , with_cuda=lowercase__ , extra_include_paths=[str(lowercase__ )] , extra_cflags=["""-DWITH_CUDA=1"""] , extra_cuda_cflags=[
"""-DCUDA_HAS_FP16=1""",
"""-D__CUDA_NO_HALF_OPERATORS__""",
"""-D__CUDA_NO_HALF_CONVERSIONS__""",
"""-D__CUDA_NO_HALF2_OPERATORS__""",
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA
| 706 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__SCREAMING_SNAKE_CASE : Optional[Any] = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__SCREAMING_SNAKE_CASE : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 149 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__a : Any = logging.getLogger(__name__)
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = bnb_quantization_config.load_in_abit
UpperCamelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
UpperCamelCase = []
# custom device map
if isinstance(lowercase_ , lowercase_ ) and len(device_map.keys() ) > 1:
UpperCamelCase = [key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase = get_keys_to_not_convert(lowercase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowercase_ )
UpperCamelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase = []
UpperCamelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowercase_ )
# compatibility with peft
UpperCamelCase = load_in_abit
UpperCamelCase = load_in_abit
UpperCamelCase = get_parameter_device(lowercase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
UpperCamelCase = replace_with_bnb_layers(lowercase_ , lowercase_ , modules_to_not_convert=lowercase_ )
# convert param to the right dtype
UpperCamelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase = name.replace(".weight" , "" ).replace(".bias" , "" )
UpperCamelCase = getattr(lowercase_ , lowercase_ , lowercase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowercase_ ):
param.to(lowercase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
f'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
f'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
UpperCamelCase = replace_with_bnb_layers(
lowercase_ , lowercase_ , modules_to_not_convert=lowercase_ )
UpperCamelCase = get_quantized_model_device_map(
lowercase_ , lowercase_ , lowercase_ , max_memory=lowercase_ , no_split_module_classes=lowercase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase = True
UpperCamelCase = any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
lowercase_ , lowercase_ , lowercase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowercase_ , offload_state_dict=lowercase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowercase_ , device_map=lowercase_ , offload_dir=lowercase_ )
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=None ) -> List[Any]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase = {"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(lowercase_ , lowercase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
UpperCamelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase = {}
UpperCamelCase = special_dtypes
UpperCamelCase = no_split_module_classes
UpperCamelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase = get_balanced_memory(
lowercase_ , low_zero=(device_map == "balanced_low_0") , max_memory=lowercase_ , **lowercase_ , )
UpperCamelCase = max_memory
UpperCamelCase = infer_auto_device_map(lowercase_ , **lowercase_ )
if isinstance(lowercase_ , lowercase_ ):
# check if don't have any quantized module on the cpu
UpperCamelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None ) -> Optional[int]:
'''simple docstring'''
if modules_to_not_convert is None:
UpperCamelCase = []
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=None , lowercase_=None , ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase = []
current_key_name.append(lowercase_ )
if isinstance(lowercase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase = ".".join(lowercase_ )
UpperCamelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowercase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
UpperCamelCase = module.weight.data
if module.bias is not None:
UpperCamelCase = module.bias.data
bnb_module.requires_grad_(lowercase_ )
setattr(lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = True
if len(list(module.children() ) ) > 0:
UpperCamelCase , UpperCamelCase = _replace_with_bnb_layers(
lowercase_ , lowercase_ , lowercase_ , lowercase_ )
UpperCamelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __magic_name__ ( lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
with init_empty_weights():
UpperCamelCase = deepcopy(lowercase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase = find_tied_parameters(lowercase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowercase_ , lowercase_ ):
UpperCamelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase = sum(lowercase_ , [] )
UpperCamelCase = len(lowercase_ ) > 0
# Check if it is a base model
UpperCamelCase = False
if hasattr(lowercase_ , "base_model_prefix" ):
UpperCamelCase = not hasattr(lowercase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase = list(model.named_children() )
UpperCamelCase = [list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase = set(lowercase_ ) - set(lowercase_ )
UpperCamelCase = list(set(lowercase_ ) ) + list(lowercase_ )
# remove ".weight" from the keys
UpperCamelCase = [".weight", ".bias"]
UpperCamelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase = name.replace(lowercase_ , "" )
filtered_module_names.append(lowercase_ )
return filtered_module_names
def __magic_name__ ( lowercase_ ) -> Any:
'''simple docstring'''
for m in model.modules():
if isinstance(lowercase_ , bnb.nn.Linearabit ):
return True
return False
def __magic_name__ ( lowercase_ ) -> List[str]:
'''simple docstring'''
return next(parameter.parameters() ).device
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(lowercase_ , lowercase_ , 0 , dtype=lowercase_ , value=lowercase_ )
UpperCamelCase = param_name
UpperCamelCase = model
if "." in tensor_name:
UpperCamelCase = tensor_name.split("." )
for split in splits[:-1]:
UpperCamelCase = getattr(lowercase_ , lowercase_ )
if new_module is None:
raise ValueError(f'''{module} has no attribute {split}.''' )
UpperCamelCase = new_module
UpperCamelCase = splits[-1]
# offload weights
UpperCamelCase = False
offload_weight(module._parameters[tensor_name] , lowercase_ , lowercase_ , index=lowercase_ )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , lowercase_ , index=lowercase_ , )
else:
offload_weight(lowercase_ , lowercase_ , lowercase_ , index=lowercase_ )
offload_weight(lowercase_ , param_name.replace("weight" , "SCB" ) , lowercase_ , index=lowercase_ )
set_module_tensor_to_device(lowercase_ , lowercase_ , "meta" , dtype=lowercase_ , value=torch.empty(*param.size() ) )
| 606 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__a : Tuple = logging.get_logger(__name__)
__a : Optional[int] = {
"""salesforce/blip2-opt-2.7b""": """https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json""",
}
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """blip_2_vision_model"""
def __init__( self , SCREAMING_SNAKE_CASE=1408 , SCREAMING_SNAKE_CASE=6144 , SCREAMING_SNAKE_CASE=39 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=224 , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.00_001 , SCREAMING_SNAKE_CASE=0.0 , SCREAMING_SNAKE_CASE=1e-10 , SCREAMING_SNAKE_CASE=True , **SCREAMING_SNAKE_CASE , ) -> str:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
UpperCamelCase = hidden_size
UpperCamelCase = intermediate_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = patch_size
UpperCamelCase = image_size
UpperCamelCase = initializer_range
UpperCamelCase = attention_dropout
UpperCamelCase = layer_norm_eps
UpperCamelCase = hidden_act
UpperCamelCase = qkv_bias
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
UpperCamelCase = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """blip_2_qformer"""
def __init__( self , SCREAMING_SNAKE_CASE=30522 , SCREAMING_SNAKE_CASE=768 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=12 , SCREAMING_SNAKE_CASE=3072 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=1e-12 , SCREAMING_SNAKE_CASE=0 , SCREAMING_SNAKE_CASE="absolute" , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=1408 , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = cross_attention_frequency
UpperCamelCase = encoder_hidden_size
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase = cls.get_config_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type" ) == "blip-2":
UpperCamelCase = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
class __UpperCAmelCase ( snake_case__ ):
"""simple docstring"""
lowercase = """blip-2"""
lowercase = True
def __init__( self , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=None , SCREAMING_SNAKE_CASE=32 , **SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE )
if vision_config is None:
UpperCamelCase = {}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values." )
if qformer_config is None:
UpperCamelCase = {}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values." )
if text_config is None:
UpperCamelCase = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
UpperCamelCase = BlipaVisionConfig(**SCREAMING_SNAKE_CASE )
UpperCamelCase = BlipaQFormerConfig(**SCREAMING_SNAKE_CASE )
UpperCamelCase = text_config["model_type"] if "model_type" in text_config else "opt"
UpperCamelCase = CONFIG_MAPPING[text_model_type](**SCREAMING_SNAKE_CASE )
UpperCamelCase = self.text_config.tie_word_embeddings
UpperCamelCase = self.text_config.is_encoder_decoder
UpperCamelCase = num_query_tokens
UpperCamelCase = self.vision_config.hidden_size
UpperCamelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
UpperCamelCase = 1.0
UpperCamelCase = 0.02
@classmethod
def __lowerCAmelCase ( cls , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , ) -> Optional[int]:
"""simple docstring"""
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self ) -> int:
"""simple docstring"""
UpperCamelCase = copy.deepcopy(self.__dict__ )
UpperCamelCase = self.vision_config.to_dict()
UpperCamelCase = self.qformer_config.to_dict()
UpperCamelCase = self.text_config.to_dict()
UpperCamelCase = self.__class__.model_type
return output
| 606 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Optional[int] = tempfile.mkdtemp()
UpperCamelCase : List[str] = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
UpperCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
UpperCamelCase : Tuple = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
UpperCamelCase : str = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self , **__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[Any] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase : Optional[Any] = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Tuple = self.get_tokenizer()
UpperCamelCase : Dict = self.get_rust_tokenizer()
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : Optional[int] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
UpperCamelCase : List[Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Union[str, Any] = AlignProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
UpperCamelCase : int = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
UpperCamelCase : List[Any] = AlignProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : int = self.get_image_processor()
UpperCamelCase : str = self.get_tokenizer()
UpperCamelCase : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase : Union[str, Any] = self.prepare_image_inputs()
UpperCamelCase : int = image_processor(UpperCamelCase__ , return_tensors='''np''' )
UpperCamelCase : Dict = processor(images=UpperCamelCase__ , return_tensors='''np''' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_image_processor()
UpperCamelCase : Optional[Any] = self.get_tokenizer()
UpperCamelCase : List[str] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase : Tuple = 'lower newer'
UpperCamelCase : Tuple = processor(text=UpperCamelCase__ )
UpperCamelCase : int = tokenizer(UpperCamelCase__ , padding='''max_length''' , max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : List[Any] = self.get_tokenizer()
UpperCamelCase : str = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase : str = 'lower newer'
UpperCamelCase : Optional[int] = self.prepare_image_inputs()
UpperCamelCase : Optional[int] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : List[str] = self.get_image_processor()
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : List[str] = processor.batch_decode(UpperCamelCase__ )
UpperCamelCase : Any = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self ):
"""simple docstring"""
UpperCamelCase : Dict = self.get_image_processor()
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Union[str, Any] = AlignProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
UpperCamelCase : str = 'lower newer'
UpperCamelCase : List[str] = self.prepare_image_inputs()
UpperCamelCase : List[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 709 |
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
__UpperCAmelCase : Optional[int] = 500000
__UpperCAmelCase , __UpperCAmelCase : Any = os.path.split(__file__)
__UpperCAmelCase : int = os.path.join(RESULTS_BASEPATH, "results", RESULTS_FILENAME.replace(".py", ".json"))
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase : Tuple = dataset.map(**SCREAMING_SNAKE_CASE_ )
@get_duration
def a ( SCREAMING_SNAKE_CASE_ : datasets.Dataset , **SCREAMING_SNAKE_CASE_ : Any ):
"""simple docstring"""
UpperCamelCase : int = dataset.filter(**SCREAMING_SNAKE_CASE_ )
def a ( ):
"""simple docstring"""
UpperCamelCase : Optional[int] = {'''num examples''': SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase : Dict = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} )
UpperCamelCase : List[str] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE_ , '''dataset.arrow''' ) , SCREAMING_SNAKE_CASE_ , num_examples=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Union[str, Any] = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''' , use_fast=SCREAMING_SNAKE_CASE_ )
def tokenize(SCREAMING_SNAKE_CASE_ : Dict ):
return tokenizer(examples['''text'''] )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[Any] = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''numpy''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''pandas''' ):
UpperCamelCase : int = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''torch''' , columns='''numbers''' ):
UpperCamelCase : Dict = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
with dataset.formatted_as(type='''tensorflow''' , columns='''numbers''' ):
UpperCamelCase : Tuple = map(SCREAMING_SNAKE_CASE_ , function=lambda SCREAMING_SNAKE_CASE_ : None , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : List[str] = map(SCREAMING_SNAKE_CASE_ , function=SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ )
UpperCamelCase : Dict = filter(SCREAMING_SNAKE_CASE_ )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(SCREAMING_SNAKE_CASE_ , '''wb''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 643 | 0 |
def _UpperCAmelCase (UpperCamelCase__ : int ):
if length <= 0 or not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise ValueError("Length must be a positive integer." )
return [n * (2 * n - 1) for n in range(UpperCamelCase__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 503 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _UpperCAmelCase (UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None ):
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class lowerCAmelCase__ :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
metadata={"help": "The csv file to plot."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Disable logarithmic scale when plotting"} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
__SCREAMING_SNAKE_CASE = field(
default=a , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
__SCREAMING_SNAKE_CASE = list_field(
default=a , metadata={"help": "List of model names that are used instead of the ones in the csv file."})
def _UpperCAmelCase (UpperCamelCase__ : List[str] ):
try:
int(UpperCamelCase__ )
return True
except ValueError:
return False
def _UpperCAmelCase (UpperCamelCase__ : int ):
try:
float(UpperCamelCase__ )
return True
except ValueError:
return False
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , __lowerCamelCase) -> Dict:
_A : int = args
_A : Union[str, Any] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}})
with open(self.args.csv_file , newline="") as csv_file:
_A : Union[str, Any] = csv.DictReader(__lowerCamelCase)
for row in reader:
_A : List[str] = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"]))
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"]))
if can_convert_to_int(row["result"]):
# value is not None
_A : Union[str, Any] = int(row["result"])
elif can_convert_to_float(row["result"]):
# value is not None
_A : Dict = float(row["result"])
def _lowerCamelCase ( self) -> Dict:
_A , _A : Any = plt.subplots()
_A : Tuple = "Time usage" if self.args.is_time else "Memory usage"
_A : int = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log")
ax.set_yscale("log")
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter())
for model_name_idx, model_name in enumerate(self.result_dict.keys()):
_A : Optional[int] = sorted(set(self.result_dict[model_name]["bsz"]))
_A : List[str] = sorted(set(self.result_dict[model_name]["seq_len"]))
_A : int = self.result_dict[model_name]["result"]
((_A) , (_A)) : Optional[Any] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_A : Tuple = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_A : Tuple = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__lowerCamelCase , )
else:
_A : Dict = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((_A) , (_A)) : Optional[Any] = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
_A : Union[str, Any] = np.asarray(__lowerCamelCase , __lowerCamelCase)[: len(__lowerCamelCase)]
plt.scatter(
__lowerCamelCase , __lowerCamelCase , label=F"{label_model_name} - {inner_loop_label}: {inner_loop_value}")
plt.plot(__lowerCamelCase , __lowerCamelCase , "--")
title_str += F" {label_model_name} vs."
_A : int = title_str[:-4]
_A : Dict = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(__lowerCamelCase)
plt.xlabel(__lowerCamelCase)
plt.ylabel(__lowerCamelCase)
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file)
else:
plt.show()
def _UpperCAmelCase ():
_A : Optional[Any] = HfArgumentParser(UpperCamelCase__ )
_A : str = parser.parse_args_into_dataclasses()[0]
_A : Optional[Any] = Plot(args=UpperCamelCase__ )
plot.plot()
if __name__ == "__main__":
main()
| 503 | 1 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCamelCase__ : str = logging.getLogger(__name__)
require_version("pytorch_lightning>=1.0.4")
UpperCamelCase__ : List[Any] = {
"base": AutoModel,
"sequence-classification": AutoModelForSequenceClassification,
"question-answering": AutoModelForQuestionAnswering,
"pretraining": AutoModelForPreTraining,
"token-classification": AutoModelForTokenClassification,
"language-modeling": AutoModelWithLMHead,
"summarization": AutoModelForSeqaSeqLM,
"translation": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCamelCase__ : str = {
"linear": get_linear_schedule_with_warmup,
"cosine": get_cosine_schedule_with_warmup,
"cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup,
"polynomial": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCamelCase__ : Any = sorted(arg_to_scheduler.keys())
UpperCamelCase__ : str = "{" + ", ".join(arg_to_scheduler_choices) + "}"
class __snake_case ( pl.LightningModule ):
def __init__( self , _A , _A=None , _A="base" , _A=None , _A=None , _A=None , **_A , ):
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A)
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = Path(self.hparams.output_dir)
SCREAMING_SNAKE_CASE_ = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
SCREAMING_SNAKE_CASE_ = config
SCREAMING_SNAKE_CASE_ = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A):
assert hasattr(self.config , _A), f"""model config doesn't have a `{p}` attribute"""
setattr(self.config , _A , getattr(self.hparams , _A))
if tokenizer is None:
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = MODEL_MODES[mode]
if model is None:
SCREAMING_SNAKE_CASE_ = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path) , config=self.config , cache_dir=_A , )
else:
SCREAMING_SNAKE_CASE_ = model
def lowerCAmelCase__ ( self , *_A , **_A):
SCREAMING_SNAKE_CASE_ = self.model_type.from_pretrained(*_A , **_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = arg_to_scheduler[self.hparams.lr_scheduler]
SCREAMING_SNAKE_CASE_ = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps())
SCREAMING_SNAKE_CASE_ = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = self.model
SCREAMING_SNAKE_CASE_ = ['bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE_ = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
SCREAMING_SNAKE_CASE_ = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A)
else:
SCREAMING_SNAKE_CASE_ = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon)
SCREAMING_SNAKE_CASE_ = optimizer
SCREAMING_SNAKE_CASE_ = self.get_lr_scheduler()
return [optimizer], [scheduler]
def lowerCAmelCase__ ( self , _A , _A):
return self.validation_step(_A , _A)
def lowerCAmelCase__ ( self , _A):
return self.validation_end(_A)
def lowerCAmelCase__ ( self):
SCREAMING_SNAKE_CASE_ = max(1 , self.hparams.gpus) # TODO: consider num_tpu_cores
SCREAMING_SNAKE_CASE_ = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def lowerCAmelCase__ ( self , _A):
if stage == "test":
SCREAMING_SNAKE_CASE_ = len(self.test_dataloader().dataset)
else:
SCREAMING_SNAKE_CASE_ = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A)
SCREAMING_SNAKE_CASE_ = len(self.train_dataloader().dataset)
def lowerCAmelCase__ ( self , _A , _A , _A = False):
raise NotImplementedError('You must implement this for your task')
def lowerCAmelCase__ ( self):
return self.train_loader
def lowerCAmelCase__ ( self):
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A)
def lowerCAmelCase__ ( self):
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A)
def lowerCAmelCase__ ( self , _A):
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/'))).pop() , str(self.hparams.max_seq_length) , ) , )
@pl.utilities.rank_zero_only
def lowerCAmelCase__ ( self , _A):
SCREAMING_SNAKE_CASE_ = self.output_dir.joinpath('best_tfmr')
SCREAMING_SNAKE_CASE_ = self.step_count
self.model.save_pretrained(_A)
self.tokenizer.save_pretrained(_A)
@staticmethod
def lowerCAmelCase__ ( _A , _A):
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name')
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A).parent / 'test_run' / 'cache') , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.')
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.')
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.')
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.')
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader')
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A)
parser.add_argument('--train_batch_size' , default=32 , type=_A)
parser.add_argument('--eval_batch_size' , default=32 , type=_A)
parser.add_argument('--adafactor' , action='store_true')
class __snake_case ( pl.Callback ):
def lowerCAmelCase__ ( self , _A , _A):
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class __snake_case ( pl.Callback ):
def lowerCAmelCase__ ( self , _A , _A):
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A)
class __snake_case ( pl.Callback ):
def lowerCAmelCase__ ( self , _A , _A):
SCREAMING_SNAKE_CASE_ = trainer.lr_schedulers[0]['scheduler']
SCREAMING_SNAKE_CASE_ = {f"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr())}
pl_module.logger.log_metrics(_A)
def lowerCAmelCase__ ( self , _A , _A):
rank_zero_info('***** Validation results *****')
SCREAMING_SNAKE_CASE_ = trainer.callback_metrics
# Log results
for key in sorted(_A):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key])))
def lowerCAmelCase__ ( self , _A , _A):
rank_zero_info('***** Test results *****')
SCREAMING_SNAKE_CASE_ = trainer.callback_metrics
# Log and save results to file
SCREAMING_SNAKE_CASE_ = os.path.join(pl_module.hparams.output_dir , 'test_results.txt')
with open(_A , 'w') as writer:
for key in sorted(_A):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key])))
writer.write('{} = {}\n'.format(_A , str(metrics[key])))
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'model_checkpoints' ) , type=_SCREAMING_SNAKE_CASE , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=_SCREAMING_SNAKE_CASE , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=_SCREAMING_SNAKE_CASE )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=_SCREAMING_SNAKE_CASE , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=_SCREAMING_SNAKE_CASE , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=_SCREAMING_SNAKE_CASE , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(_SCREAMING_SNAKE_CASE ).parent / 'test_run' / 'dummy-train-data' ) , type=_SCREAMING_SNAKE_CASE , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : BaseTransformer , _SCREAMING_SNAKE_CASE : argparse.Namespace , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[Any]=None , **_SCREAMING_SNAKE_CASE : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
SCREAMING_SNAKE_CASE_ = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
# add custom checkpoints
if checkpoint_callback is None:
SCREAMING_SNAKE_CASE_ = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(_SCREAMING_SNAKE_CASE )
if logging_callback is None:
SCREAMING_SNAKE_CASE_ = LoggingCallback()
SCREAMING_SNAKE_CASE_ = {}
if args.fpaa:
SCREAMING_SNAKE_CASE_ = 16
if args.gpus > 1:
SCREAMING_SNAKE_CASE_ = 'auto'
SCREAMING_SNAKE_CASE_ = 'ddp'
SCREAMING_SNAKE_CASE_ = args.accumulate_grad_batches
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = 'auto'
SCREAMING_SNAKE_CASE_ = pl.Trainer.from_argparse_args(
_SCREAMING_SNAKE_CASE , weights_summary=_SCREAMING_SNAKE_CASE , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_SCREAMING_SNAKE_CASE , val_check_interval=1 , num_sanity_val_steps=2 , **_SCREAMING_SNAKE_CASE , )
if args.do_train:
trainer.fit(_SCREAMING_SNAKE_CASE )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer
| 620 |
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
UpperCamelCase__ : int = Lock()
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
SCREAMING_SNAKE_CASE_ = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(_SCREAMING_SNAKE_CASE )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
SCREAMING_SNAKE_CASE_ = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
SCREAMING_SNAKE_CASE_ = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# after all swaps are performed, send the values back to main
result_pipe[1].send(_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
for i in range(1 , len(_SCREAMING_SNAKE_CASE ) - 1 ):
SCREAMING_SNAKE_CASE_ = Pipe()
SCREAMING_SNAKE_CASE_ = Pipe()
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
SCREAMING_SNAKE_CASE_ = temp_rs
SCREAMING_SNAKE_CASE_ = temp_rr
process_array_.append(
Process(
target=_SCREAMING_SNAKE_CASE , args=(
len(_SCREAMING_SNAKE_CASE ) - 1,
arr[len(_SCREAMING_SNAKE_CASE ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(_SCREAMING_SNAKE_CASE ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(_SCREAMING_SNAKE_CASE ) ):
SCREAMING_SNAKE_CASE_ = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def _UpperCAmelCase ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = odd_even_transposition(_SCREAMING_SNAKE_CASE )
print('Sorted List\n' )
print(*_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 620 | 1 |
'''simple docstring'''
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class _SCREAMING_SNAKE_CASE ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = RoFormerTokenizer
SCREAMING_SNAKE_CASE_ = RoFormerTokenizerFast
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
def _lowerCamelCase ( self , **_snake_case ):
"""simple docstring"""
return self.tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **_snake_case )
def _lowerCamelCase ( self , **_snake_case ):
"""simple docstring"""
return self.rust_tokenizer_class.from_pretrained('''junnyu/roformer_chinese_base''' , **_snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = '''永和服装饰品有限公司,今天天气非常好'''
__lowerCamelCase = '''永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'''
return input_text, output_text
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase , __lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
__lowerCamelCase = self.get_rust_tokenizer()
__lowerCamelCase , __lowerCamelCase = self.get_chinese_input_output_texts()
__lowerCamelCase = tokenizer.tokenize(_snake_case )
self.assertListEqual(_snake_case , output_text.split() )
__lowerCamelCase = tokens + [tokenizer.unk_token]
__lowerCamelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_snake_case ) , _snake_case )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
| 316 |
'''simple docstring'''
def lowerCamelCase_ ( A_ , A_ ):
__lowerCamelCase = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
__lowerCamelCase = n - k
# Calculate C(n,k)
for i in range(A_ ):
result *= n - i
result //= i + 1
return result
def lowerCamelCase_ ( A_ ):
return binomial_coefficient(2 * node_count , A_ ) // (node_count + 1)
def lowerCamelCase_ ( A_ ):
if n < 0:
raise ValueError('''factorial() not defined for negative values''' )
__lowerCamelCase = 1
for i in range(1 , n + 1 ):
result *= i
return result
def lowerCamelCase_ ( A_ ):
return catalan_number(A_ ) * factorial(A_ )
if __name__ == "__main__":
_UpperCamelCase : Dict =int(input("Enter the number of nodes: ").strip() or 0)
if node_count <= 0:
raise ValueError("We need some nodes to work with.")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 316 | 1 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE_ :
'''simple docstring'''
def __init__( self) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = psutil.Process()
snake_case__ : Optional[int] = False
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
snake_case__ : int = -1
while True:
snake_case__ : str = max(self.process.memory_info().rss , self.cpu_memory_peak)
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
snake_case__ : List[Any] = True
snake_case__ : Union[str, Any] = threading.Thread(target=self.peak_monitor)
snake_case__ : Any = True
self.thread.start()
def UpperCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
snake_case__ : str = False
self.thread.join()
return self.cpu_memory_peak
lowercase = PeakCPUMemory()
def A__ ( ) -> Tuple:
'''simple docstring'''
snake_case__ : Union[str, Any] = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case__ : Union[str, Any] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case__ : List[Any] = torch.cuda.memory_allocated(_UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def A__ ( _UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
snake_case__ : int = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case__ : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
snake_case__ : int = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case__ : int = (torch.cuda.memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
snake_case__ : Dict = (torch.cuda.max_memory_allocated(_UpperCAmelCase ) - start_measures[str(_UpperCAmelCase )]) / 2**20
return measures
def A__ ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
print(F"""{description}:""" )
print(F"""- Time: {measures['time']:.2f}s""" )
for i in range(torch.cuda.device_count() ):
print(F"""- GPU {i} allocated: {measures[str(_UpperCAmelCase )]:.2f}MiB""" )
snake_case__ : Union[str, Any] = measures[F"""{i}-peak"""]
print(F"""- GPU {i} peak: {peak:.2f}MiB""" )
print(F"""- CPU RAM allocated: {measures['cpu']:.2f}MiB""" )
print(F"""- CPU RAM peak: {measures['cpu-peak']:.2f}MiB""" )
| 150 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
def UpperCAmelCase ( self) -> Tuple:
'''simple docstring'''
snake_case__ : List[Any] = tempfile.mkdtemp()
snake_case__ : Tuple = 8
# DPR tok
snake_case__ : List[str] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case__ : List[Any] = os.path.join(self.tmpdirname , "dpr_tokenizer")
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__)
snake_case__ : Optional[int] = os.path.join(lowerCamelCase__ , DPR_VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
# BART tok
snake_case__ : Optional[int] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case__ : str = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__))))
snake_case__ : Tuple = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case__ : List[Any] = {"unk_token": "<unk>"}
snake_case__ : Tuple = os.path.join(self.tmpdirname , "bart_tokenizer")
os.makedirs(lowerCamelCase__ , exist_ok=lowerCamelCase__)
snake_case__ : Any = os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES["vocab_file"])
snake_case__ : Optional[Any] = os.path.join(lowerCamelCase__ , BART_VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as fp:
fp.write(json.dumps(lowerCamelCase__) + "\n")
with open(self.merges_file , "w" , encoding="utf-8") as fp:
fp.write("\n".join(lowerCamelCase__))
def UpperCAmelCase ( self) -> DPRQuestionEncoderTokenizer:
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer"))
def UpperCAmelCase ( self) -> BartTokenizer:
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer"))
def UpperCAmelCase ( self) -> List[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname)
@require_tokenizers
def UpperCAmelCase ( self) -> Any:
'''simple docstring'''
snake_case__ : Any = os.path.join(self.tmpdirname , "rag_tokenizer")
snake_case__ : Any = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict())
snake_case__ : Dict = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer())
rag_config.save_pretrained(lowerCamelCase__)
rag_tokenizer.save_pretrained(lowerCamelCase__)
snake_case__ : Optional[int] = RagTokenizer.from_pretrained(lowerCamelCase__ , config=lowerCamelCase__)
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase__)
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab())
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase__)
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab())
@slow
def UpperCAmelCase ( self) -> Dict:
'''simple docstring'''
snake_case__ : Optional[Any] = RagTokenizer.from_pretrained("facebook/rag-token-nq")
snake_case__ : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
snake_case__ : Union[str, Any] = tokenizer(lowerCamelCase__)
self.assertIsNotNone(lowerCamelCase__)
@slow
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
snake_case__ : Any = RagTokenizer.from_pretrained("facebook/rag-sequence-nq")
snake_case__ : Tuple = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
snake_case__ : Optional[Any] = tokenizer(lowerCamelCase__)
self.assertIsNotNone(lowerCamelCase__)
| 150 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = GPTSanJapaneseTokenizer
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Optional[Any] = {'do_clean_text': False, 'add_prefix_space': False}
def _lowerCamelCase ( self ):
"""simple docstring"""
super().setUp()
# fmt: off
_lowercase : List[str] = ["こん", "こんに", "にちは", "ばんは", "世界,㔺界", "、", "。", "<BR>", "<SP>", "<TAB>", "<URL>", "<EMAIL>", "<TEL>", "<DATE>", "<PRICE>", "<BLOCK>", "<KIGOU>", "<U2000U2BFF>", "<|emoji1|>", "<unk>", "<|bagoftoken|>", "<|endoftext|>"]
# fmt: on
_lowercase : Union[str, Any] = {"emoji": {"\ud83d\ude00": "<|emoji1|>"}, "emoji_inv": {"<|emoji1|>": "\ud83d\ude00"}} # 😀
_lowercase : List[Any] = {"unk_token": "<unk>"}
_lowercase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["emoji_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.emoji_file , "w" ) as emoji_writer:
emoji_writer.write(json.dumps(_UpperCamelCase ) )
def _lowerCamelCase ( self , **_UpperCamelCase ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **_UpperCamelCase )
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase : int = "こんにちは、世界。 \nこんばんは、㔺界。😀"
_lowercase : str = "こんにちは、世界。 \nこんばんは、世界。😀"
return input_text, output_text
def _lowerCamelCase ( self , _UpperCamelCase ):
"""simple docstring"""
_lowercase , _lowercase : int = self.get_input_output_texts(_UpperCamelCase )
_lowercase : Tuple = tokenizer.encode(_UpperCamelCase , add_special_tokens=_UpperCamelCase )
_lowercase : List[Any] = tokenizer.decode(_UpperCamelCase , clean_up_tokenization_spaces=_UpperCamelCase )
return text, ids
def _lowerCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def _lowerCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def _lowerCamelCase ( self ):
"""simple docstring"""
pass # TODO add if relevant
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
_lowercase : Optional[int] = "こんにちは、世界。 こんばんは、㔺界。"
_lowercase : Tuple = ["こん", "にちは", "、", "世界", "。", "<SP>", "こん", "ばんは", "、", "㔺界", "。"]
_lowercase : str = tokenizer.tokenize(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Testing conversion to ids without special tokens
_lowercase : str = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_lowercase : int = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
# Testing conversion to ids with special tokens
_lowercase : Dict = tokens + [tokenizer.unk_token]
_lowercase : Optional[Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_lowercase : int = tokenizer.convert_tokens_to_ids(_UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = self.get_tokenizer()
# Testing tokenization
_lowercase : str = "こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。"
_lowercase : List[str] = "こんにちは、、、、世界。こんばんは、、、、世界。"
_lowercase : str = tokenizer.encode(_UpperCamelCase )
_lowercase : Tuple = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_lowercase : Any = "こんにちは、世界。"
_lowercase : Union[str, Any] = "こんばんは、㔺界。😀"
_lowercase : List[str] = "こんにちは、世界。こんばんは、世界。😀"
_lowercase : Dict = tokenizer.encode(prefix_text + input_text )
_lowercase : List[str] = tokenizer.encode("" , prefix_text=prefix_text + input_text )
_lowercase : Tuple = tokenizer.encode(_UpperCamelCase , prefix_text=_UpperCamelCase )
_lowercase : int = tokenizer.decode(_UpperCamelCase )
_lowercase : Tuple = tokenizer.decode(_UpperCamelCase )
_lowercase : List[str] = tokenizer.decode(_UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
# Testing tokenization
_lowercase : str = "こんにちは、世界。"
_lowercase : Tuple = "こんばんは、㔺界。😀"
_lowercase : Tuple = len(tokenizer.encode(_UpperCamelCase ) ) - 2
_lowercase : Dict = len(tokenizer.encode(_UpperCamelCase ) ) - 2
_lowercase : Union[str, Any] = [1] + [0] * (len_prefix + len_text + 1)
_lowercase : Optional[int] = [1] * (len_prefix + len_text + 1) + [0]
_lowercase : str = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_lowercase : Dict = tokenizer(prefix_text + input_text ).token_type_ids
_lowercase : int = tokenizer("" , prefix_text=prefix_text + input_text ).token_type_ids
_lowercase : Any = tokenizer(_UpperCamelCase , prefix_text=_UpperCamelCase ).token_type_ids
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
self.assertListEqual(_UpperCamelCase , _UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_lowercase : Union[str, Any] = tokenizer.encode("あンいワ" )
_lowercase : str = tokenizer.encode("" , prefix_text="あンいワ" )
_lowercase : Optional[int] = tokenizer.encode("いワ" , prefix_text="あン" )
self.assertEqual(tokenizer.decode(_UpperCamelCase ) , tokenizer.decode(_UpperCamelCase ) )
self.assertEqual(tokenizer.decode(_UpperCamelCase ) , tokenizer.decode(_UpperCamelCase ) )
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
self.assertNotEqual(_UpperCamelCase , _UpperCamelCase )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[Any] = self.tokenizer_class.from_pretrained("Tanrei/GPTSAN-japanese" )
_lowercase : List[Any] = [["武田信玄", "は、"], ["織田信長", "の配下の、"]]
_lowercase : Optional[Any] = tokenizer(_UpperCamelCase , padding=_UpperCamelCase )
_lowercase : int = tokenizer.batch_encode_plus(_UpperCamelCase , padding=_UpperCamelCase )
# fmt: off
_lowercase : str = [[35993, 8640, 25948, 35998, 30647, 35675, 35999, 35999], [35993, 10382, 9868, 35998, 30646, 9459, 30646, 35675]]
_lowercase : Optional[Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_lowercase : Union[str, Any] = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , _UpperCamelCase )
self.assertListEqual(x_token.token_type_ids , _UpperCamelCase )
self.assertListEqual(x_token.attention_mask , _UpperCamelCase )
self.assertListEqual(x_token_a.input_ids , _UpperCamelCase )
self.assertListEqual(x_token_a.token_type_ids , _UpperCamelCase )
self.assertListEqual(x_token_a.attention_mask , _UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
def _lowerCamelCase ( self ):
"""simple docstring"""
pass
| 245 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class a__ :
def __init__( self , _UpperCamelCase , _UpperCamelCase=2 , _UpperCamelCase=8 , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=99 , _UpperCamelCase=16 , _UpperCamelCase=5 , _UpperCamelCase=2 , _UpperCamelCase=36 , _UpperCamelCase="gelu" , _UpperCamelCase=0.0 , _UpperCamelCase=0.0 , _UpperCamelCase=512 , _UpperCamelCase=16 , _UpperCamelCase=2 , _UpperCamelCase=0.0_2 , _UpperCamelCase=3 , _UpperCamelCase=4 , _UpperCamelCase=None , ):
"""simple docstring"""
_lowercase : Tuple = parent
_lowercase : Optional[int] = batch_size
_lowercase : Optional[Any] = seq_length
_lowercase : Optional[Any] = is_training
_lowercase : List[str] = use_input_mask
_lowercase : List[Any] = use_token_type_ids
_lowercase : List[str] = use_labels
_lowercase : str = vocab_size
_lowercase : str = hidden_size
_lowercase : Optional[int] = num_hidden_layers
_lowercase : Optional[int] = num_attention_heads
_lowercase : int = intermediate_size
_lowercase : str = hidden_act
_lowercase : Any = hidden_dropout_prob
_lowercase : Union[str, Any] = attention_probs_dropout_prob
_lowercase : Dict = max_position_embeddings
_lowercase : List[str] = type_vocab_size
_lowercase : str = type_sequence_label_size
_lowercase : List[str] = initializer_range
_lowercase : Optional[Any] = num_labels
_lowercase : List[str] = num_choices
_lowercase : Optional[int] = scope
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase : Any = None
if self.use_input_mask:
_lowercase : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
_lowercase : List[Any] = None
if self.use_token_type_ids:
_lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowercase : List[Any] = None
_lowercase : Dict = None
_lowercase : int = None
if self.use_labels:
_lowercase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
_lowercase : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCamelCase , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = self.get_config()
_lowercase : Any = 300
return config
def _lowerCamelCase ( self ):
"""simple docstring"""
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Tuple = self.prepare_config_and_inputs()
_lowercase : List[str] = True
_lowercase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = MraModel(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Optional[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
_lowercase : Tuple = model(_UpperCamelCase , token_type_ids=_UpperCamelCase )
_lowercase : Optional[int] = model(_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
_lowercase : Any = True
_lowercase : Dict = MraModel(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : str = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , encoder_attention_mask=_UpperCamelCase , )
_lowercase : Tuple = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , )
_lowercase : Optional[Any] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = MraForMaskedLM(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Tuple = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Dict = MraForQuestionAnswering(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Optional[int] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , start_positions=_UpperCamelCase , end_positions=_UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.num_labels
_lowercase : Tuple = MraForSequenceClassification(_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : List[str] = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : List[str] = self.num_labels
_lowercase : List[Any] = MraForTokenClassification(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : str = model(_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
_lowercase : str = self.num_choices
_lowercase : Dict = MraForMultipleChoice(config=_UpperCamelCase )
model.to(_UpperCamelCase )
model.eval()
_lowercase : Union[str, Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : str = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowercase : Optional[int] = model(
_UpperCamelCase , attention_mask=_UpperCamelCase , token_type_ids=_UpperCamelCase , labels=_UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.prepare_config_and_inputs()
(
(
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) , (
_lowercase
) ,
) : Union[str, Any] = config_and_inputs
_lowercase : int = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class a__ ( lowerCamelCase_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : Union[str, Any] = False
_SCREAMING_SNAKE_CASE : List[str] = False
_SCREAMING_SNAKE_CASE : Optional[int] = False
_SCREAMING_SNAKE_CASE : List[Any] = ()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = MraModelTester(self )
_lowercase : Any = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37 )
def _lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowercase : Any = type
self.model_tester.create_and_check_model(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCamelCase )
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCamelCase )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase : Optional[Any] = MraModel.from_pretrained(_UpperCamelCase )
self.assertIsNotNone(_UpperCamelCase )
@unittest.skip(reason="MRA does not output attentions" )
def _lowerCamelCase ( self ):
"""simple docstring"""
return
@require_torch
class a__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = MraModel.from_pretrained("uw-madison/mra-base-512-4" )
_lowercase : List[str] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Dict = model(_UpperCamelCase )[0]
_lowercase : List[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , _UpperCamelCase )
_lowercase : int = torch.tensor(
[[[-0.0_1_4_0, 0.0_8_3_0, -0.0_3_8_1], [0.1_5_4_6, 0.1_4_0_2, 0.0_2_2_0], [0.1_1_6_2, 0.0_8_5_1, 0.0_1_6_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Dict = MraForMaskedLM.from_pretrained("uw-madison/mra-base-512-4" )
_lowercase : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Optional[Any] = model(_UpperCamelCase )[0]
_lowercase : Tuple = 50265
_lowercase : Dict = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
_lowercase : List[Any] = torch.tensor(
[[[9.2_5_9_5, -3.6_0_3_8, 1_1.8_8_1_9], [9.3_8_6_9, -3.2_6_9_3, 1_1.0_9_5_6], [1_1.8_5_2_4, -3.4_9_3_8, 1_3.1_2_1_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self ):
"""simple docstring"""
_lowercase : Optional[Any] = MraForMaskedLM.from_pretrained("uw-madison/mra-base-4096-8-d3" )
_lowercase : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
_lowercase : Union[str, Any] = model(_UpperCamelCase )[0]
_lowercase : List[str] = 50265
_lowercase : Dict = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , _UpperCamelCase )
_lowercase : Union[str, Any] = torch.tensor(
[[[5.4_7_8_9, -2.3_5_6_4, 7.5_0_6_4], [7.9_0_6_7, -1.3_3_6_9, 9.9_6_6_8], [9.0_7_1_2, -1.8_1_0_6, 7.0_3_8_0]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _UpperCamelCase , atol=1E-4 ) )
| 245 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase_ ( self : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
UpperCamelCase = get_activation("""gelu""" )
self.assertTrue(torch.allclose(gelu_python(UpperCAmelCase__ ) , torch_builtin(UpperCAmelCase__ ) ) )
self.assertFalse(torch.allclose(gelu_python(UpperCAmelCase__ ) , gelu_new(UpperCAmelCase__ ) ) )
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
UpperCamelCase = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
UpperCamelCase = get_activation("""gelu""" )
UpperCamelCase = get_activation("""gelu_10""" )
UpperCamelCase = torch_builtin(UpperCAmelCase__ )
UpperCamelCase = geluaa(UpperCAmelCase__ )
UpperCamelCase = torch.where(y_gelu_aa < 10.0 , 1 , 0 )
self.assertTrue(torch.max(UpperCAmelCase__ ).item() == 10.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
get_activation("""gelu""" )
get_activation("""gelu_10""" )
get_activation("""gelu_fast""" )
get_activation("""gelu_new""" )
get_activation("""gelu_python""" )
get_activation("""gelu_pytorch_tanh""" )
get_activation("""linear""" )
get_activation("""mish""" )
get_activation("""quick_gelu""" )
get_activation("""relu""" )
get_activation("""sigmoid""" )
get_activation("""silu""" )
get_activation("""swish""" )
get_activation("""tanh""" )
with self.assertRaises(UpperCAmelCase__ ):
get_activation("""bogus""" )
with self.assertRaises(UpperCAmelCase__ ):
get_activation(UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
UpperCamelCase = get_activation("""gelu""" )
UpperCamelCase = 1
UpperCamelCase = get_activation("""gelu""" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(UpperCAmelCase__ ):
UpperCamelCase = acta.a
| 718 |
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase ( __snake_case ):
lowercase = ["""image_processor""", """tokenizer"""]
lowercase = """FlavaImageProcessor"""
lowercase = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Optional[int] , __magic_name__ : Optional[int]=None , __magic_name__ : str=None , **__magic_name__ : Any ):
"""simple docstring"""
UpperCamelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __magic_name__ , )
UpperCamelCase = kwargs.pop("""feature_extractor""" )
UpperCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__magic_name__ , __magic_name__ )
UpperCamelCase = self.image_processor
def __call__( self : int , __magic_name__ : Optional[ImageInput] = None , __magic_name__ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , __magic_name__ : bool = True , __magic_name__ : Union[bool, str, PaddingStrategy] = False , __magic_name__ : Union[bool, str, TruncationStrategy] = False , __magic_name__ : Optional[int] = None , __magic_name__ : int = 0 , __magic_name__ : Optional[int] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = False , __magic_name__ : bool = True , __magic_name__ : Optional[Union[str, TensorType]] = None , **__magic_name__ : str , ):
"""simple docstring"""
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
UpperCamelCase = self.tokenizer(
text=__magic_name__ , add_special_tokens=__magic_name__ , padding=__magic_name__ , truncation=__magic_name__ , max_length=__magic_name__ , stride=__magic_name__ , pad_to_multiple_of=__magic_name__ , return_token_type_ids=__magic_name__ , return_attention_mask=__magic_name__ , return_overflowing_tokens=__magic_name__ , return_special_tokens_mask=__magic_name__ , return_offsets_mapping=__magic_name__ , return_length=__magic_name__ , verbose=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if images is not None:
UpperCamelCase = self.image_processor(
__magic_name__ , return_image_mask=__magic_name__ , return_codebook_pixels=__magic_name__ , return_tensors=__magic_name__ , **__magic_name__ , )
if text is not None and images is not None:
encoding.update(__magic_name__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__magic_name__ ) , tensor_type=__magic_name__ )
def lowerCamelCase_ ( self : Tuple , *__magic_name__ : Dict , **__magic_name__ : List[Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__magic_name__ , **__magic_name__ )
def lowerCamelCase_ ( self : int , *__magic_name__ : Tuple , **__magic_name__ : Optional[Any] ):
"""simple docstring"""
return self.tokenizer.decode(*__magic_name__ , **__magic_name__ )
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = self.tokenizer.model_input_names
UpperCamelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __magic_name__ , )
return self.image_processor_class
@property
def lowerCamelCase_ ( self : Tuple ):
"""simple docstring"""
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __magic_name__ , )
return self.image_processor
| 181 | 0 |
import pprint
import requests
SCREAMING_SNAKE_CASE = 'https://zenquotes.io/api'
def a ():
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def a ():
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = random_quotes()
pprint.pprint(response)
| 99 |
def _lowercase( __a : list[int] ):
a__ =len(__a )
for i in range(__a ):
for j in range(i + 1 , __a ):
if numbers[j] < numbers[i]:
a__ , a__ =numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
_lowerCAmelCase: Tuple = input('Enter numbers separated by a comma:\n').strip()
_lowerCAmelCase: int = [int(item) for item in user_input.split(',')]
print(exchange_sort(unsorted))
| 20 | 0 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
@property
def lowercase_ ( self : List[str] ) ->List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase_ ( self : Tuple ) ->Any:
snake_case__ : Optional[Any] = ort.SessionOptions()
snake_case__ : int = False
return options
def lowercase_ ( self : Union[str, Any] ) ->List[str]:
snake_case__ : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
snake_case__ : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
snake_case__ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting', revision='onnx', safety_checker=_snake_case, feature_extractor=_snake_case, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : Tuple = 'A red cat sitting on a park bench'
snake_case__ : Union[str, Any] = np.random.RandomState(0 )
snake_case__ : List[Any] = pipe(
prompt=_snake_case, image=_snake_case, mask_image=_snake_case, guidance_scale=7.5, num_inference_steps=1_0, generator=_snake_case, output_type='np', )
snake_case__ : Dict = output.images
snake_case__ : str = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[Any] = np.array([0.2_5_1_4, 0.3_0_0_7, 0.3_5_1_7, 0.1_7_9_0, 0.2_3_8_2, 0.3_1_6_7, 0.1_9_4_4, 0.2_2_7_3, 0.2_4_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def lowercase_ ( self : Union[str, Any] ) ->List[str]:
snake_case__ : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
snake_case__ : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
snake_case__ : List[Any] = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting', subfolder='scheduler', revision='onnx' )
snake_case__ : List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting', revision='onnx', scheduler=_snake_case, safety_checker=_snake_case, feature_extractor=_snake_case, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : List[str] = 'A red cat sitting on a park bench'
snake_case__ : str = np.random.RandomState(0 )
snake_case__ : Tuple = pipe(
prompt=_snake_case, image=_snake_case, mask_image=_snake_case, guidance_scale=7.5, num_inference_steps=2_0, generator=_snake_case, output_type='np', )
snake_case__ : Union[str, Any] = output.images
snake_case__ : List[str] = images[0, 2_5_5:2_5_8, 2_5_5:2_5_8, -1]
assert images.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Tuple = np.array([0.0_0_8_6, 0.0_0_7_7, 0.0_0_8_3, 0.0_0_9_3, 0.0_1_0_7, 0.0_1_3_9, 0.0_0_9_4, 0.0_0_9_7, 0.0_1_2_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 243 |
from __future__ import annotations
def lowercase_ (A : list[int] ):
return len(set(A ) ) == len(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 243 | 1 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class A__:
"""simple docstring"""
def __init__( self , _lowercase , _lowercase = 13 , _lowercase = 64 , _lowercase = 2 , _lowercase = 3 , _lowercase = 3 , _lowercase = True , _lowercase = True , _lowercase = 128 , _lowercase=[16, 32, 64, 128] , _lowercase = 7 , _lowercase = 4 , _lowercase = 37 , _lowercase = "gelu" , _lowercase = 0.1 , _lowercase = 0.1 , _lowercase = 10 , _lowercase = 0.0_2 , _lowercase = 2 , _lowercase = 1 , _lowercase = 128 , _lowercase = [2, 2, 2, 2] , _lowercase = 2 , _lowercase = 2 , ) -> Dict:
a_ : Dict = parent
a_ : Optional[int] = batch_size
a_ : str = image_size
a_ : Dict = patch_size
a_ : str = num_channels
a_ : Optional[int] = is_training
a_ : Optional[int] = use_labels
a_ : Tuple = hidden_size
a_ : Optional[int] = num_hidden_layers
a_ : List[Any] = num_attention_heads
a_ : List[Any] = intermediate_size
a_ : List[Any] = hidden_act
a_ : Tuple = hidden_dropout_prob
a_ : Optional[Any] = attention_probs_dropout_prob
a_ : Any = type_sequence_label_size
a_ : Tuple = initializer_range
a_ : Optional[Any] = encoder_stride
a_ : Tuple = num_attention_outputs
a_ : List[str] = embed_dim
a_ : Dict = embed_dim + 1
a_ : Dict = resolution
a_ : Tuple = depths
a_ : Union[str, Any] = hidden_sizes
a_ : Dict = dim
a_ : List[str] = mlp_expansion_ratio
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Optional[int] = None
if self.use_labels:
a_ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
a_ : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> Any:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> str:
a_ : int = TFEfficientFormerModel(config=_lowercase )
a_ : int = model(_lowercase , training=_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase ) -> Optional[int]:
a_ : Dict = self.type_sequence_label_size
a_ : Union[str, Any] = TFEfficientFormerForImageClassification(_lowercase )
a_ : List[str] = model(_lowercase , labels=_lowercase , training=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a_ : Any = 1
a_ : Optional[Any] = TFEfficientFormerForImageClassification(_lowercase )
a_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a_ : Dict = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
a_ : Any = self.prepare_config_and_inputs()
a_ , a_ , a_ : Any = config_and_inputs
a_ : Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class A__(a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : int = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
_A : List[Any] = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
_A : Optional[Any] = False
_A : Tuple = False
_A : List[str] = False
_A : List[str] = False
_A : Union[str, Any] = False
def UpperCamelCase__ ( self ) -> int:
a_ : List[Any] = TFEfficientFormerModelTester(self )
a_ : Dict = ConfigTester(
self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def UpperCamelCase__ ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def UpperCamelCase__ ( self ) -> int:
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
pass
def UpperCamelCase__ ( self ) -> Any:
a_ , a_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Optional[Any] = model_class(_lowercase )
a_ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : Optional[Any] = [*signature.parameters.keys()]
a_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowercase )
def UpperCamelCase__ ( self ) -> Dict:
def check_hidden_states_output(_lowercase , _lowercase , _lowercase ):
a_ : List[str] = model_class(_lowercase )
a_ : Optional[Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
a_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
a_ : Any = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowercase ) , _lowercase )
if hasattr(self.model_tester , """encoder_seq_length""" ):
a_ : Optional[Any] = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , """chunk_length""" ) and self.model_tester.chunk_length > 1:
a_ : Any = seq_length * self.model_tester.chunk_length
else:
a_ : Union[str, Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
a_ : List[str] = outputs.decoder_hidden_states
self.asseretIsInstance(_lowercase , (list, tuple) )
self.assertEqual(len(_lowercase ) , _lowercase )
a_ : List[Any] = getattr(self.model_tester , """seq_length""" , _lowercase )
a_ : List[Any] = getattr(self.model_tester , """decoder_seq_length""" , _lowercase )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : Any = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Union[str, Any] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def UpperCamelCase__ ( self , _lowercase , _lowercase , _lowercase=False ) -> int:
a_ : Optional[Any] = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ) -> Union[str, Any]:
a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def UpperCamelCase__ ( self ) -> List[str]:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def UpperCamelCase__ ( self ) -> Dict:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Tuple = TFEfficientFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ , a_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
a_ : Dict = True
a_ : Dict = getattr(self.model_tester , """seq_length""" , _lowercase )
a_ : Any = getattr(self.model_tester , """encoder_seq_length""" , _lowercase )
a_ : Optional[int] = getattr(self.model_tester , """key_length""" , _lowercase )
a_ : Dict = getattr(self.model_tester , """chunk_length""" , _lowercase )
if chunk_length is not None and hasattr(self.model_tester , """num_hashes""" ):
a_ : Optional[Any] = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
a_ : Optional[int] = True
a_ : str = False
a_ : List[Any] = True
a_ : Union[str, Any] = model_class(_lowercase )
a_ : List[str] = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
a_ : str = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a_ : int = True
a_ : Any = model_class(_lowercase )
a_ : Dict = model(**self._prepare_for_class(_lowercase , _lowercase ) , training=_lowercase )
a_ : Optional[Any] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_lowercase ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCamelCase__ ( self ) -> int:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
a_ : Union[str, Any] = model_class(_lowercase )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
a_ : Any = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=_lowercase )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
a_ : str = model(_lowercase )
self.assertTrue(outputs_dict is not None )
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_tf
@require_vision
class A__(unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ) -> int:
a_ : int = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
a_ : List[str] = self.default_image_processor
a_ : List[str] = prepare_img()
a_ : List[str] = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
a_ : Dict = model(**_lowercase , training=_lowercase )
# verify the logits
a_ : List[Any] = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
a_ : List[Any] = tf.constant([-0.0_5_5_5, 0.4_8_2_5, -0.0_8_5_2] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Dict:
a_ : str = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
a_ : Optional[Any] = self.default_image_processor
a_ : Optional[int] = prepare_img()
a_ : int = image_processor(images=_lowercase , return_tensors="""tf""" )
# forward pass
a_ : Union[str, Any] = model(**_lowercase , training=_lowercase )
# verify the logits
a_ : Dict = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowercase )
a_ : int = tf.constant([-0.1_3_1_2, 0.4_3_5_3, -1.0_4_9_9] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1e-4 ) )
| 540 |
from __future__ import annotations
def _UpperCAmelCase ( a__):
'''simple docstring'''
if len(a__) == 0:
return []
a_ , a_ : List[Any] = min(a__), max(a__)
a_ : Tuple = int(max_value - min_value) + 1
a_ : list[list] = [[] for _ in range(a__)]
for i in my_list:
buckets[int(i - min_value)].append(a__)
return [v for bucket in buckets for v in sorted(a__)]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15]
| 540 | 1 |
"""simple docstring"""
lowerCAmelCase__ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCAmelCase__ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCAmelCase__ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 716 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
@slow
def snake_case_ (self ) -> List[str]:
UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=__a ).to(__a )
UpperCamelCase = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCamelCase = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCamelCase = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCamelCase = model(input_ids.to(__a ) , labels=labels.to(__a ) ).loss
UpperCamelCase = -(labels.shape[-1] * loss.item())
UpperCamelCase = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 544 | 0 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) | 6 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = 42
class UpperCamelCase_ ( UpperCamelCase__ , UpperCamelCase__ ):
@register_to_config
def __init__( self :Union[str, Any] , __A :int = 3 , __A :int = 3 , __A :Tuple[str] = ("DownEncoderBlock2D",) , __A :Tuple[str] = ("UpDecoderBlock2D",) , __A :Tuple[int] = (64,) , __A :int = 1 , __A :str = "silu" , __A :int = 3 , __A :int = 32 , __A :int = 256 , __A :int = 32 , __A :Optional[int] = None , __A :float = 0.1_8_2_1_5 , __A :str = "group" , ) -> Any:
"""simple docstring"""
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE__ = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
SCREAMING_SNAKE_CASE__ = vq_embed_dim if vq_embed_dim is not None else latent_channels
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
SCREAMING_SNAKE_CASE__ = VectorQuantizer(__A , __A , beta=0.2_5 , remap=__A , sane_index_shape=__A )
SCREAMING_SNAKE_CASE__ = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
SCREAMING_SNAKE_CASE__ = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self :Union[str, Any] , __A :torch.FloatTensor , __A :bool = True ) -> VQEncoderOutput:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.encoder(__A )
SCREAMING_SNAKE_CASE__ = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self :Tuple , __A :torch.FloatTensor , __A :bool = False , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
if not force_not_quantize:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.quantize(__A )
else:
SCREAMING_SNAKE_CASE__ = h
SCREAMING_SNAKE_CASE__ = self.post_quant_conv(__A )
SCREAMING_SNAKE_CASE__ = self.decoder(__A , quant if self.config.norm_type == """spatial""" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self :int , __A :torch.FloatTensor , __A :bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = sample
SCREAMING_SNAKE_CASE__ = self.encode(__A ).latents
SCREAMING_SNAKE_CASE__ = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A ) | 6 | 1 |
'''simple docstring'''
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
a_ = False
a_ = False
def _a( UpperCamelCase__ : Namespace ):
'''simple docstring'''
return TrainCommand(UpperCamelCase__ )
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
@staticmethod
def __magic_name__ ( __lowercase : ArgumentParser ) -> Any:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =parser.add_parser('''train''' , help='''CLI tool to train a model on a task.''' )
train_parser.add_argument(
'''--train_data''' , type=__lowercase , required=__lowercase , help='''path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.''' , )
train_parser.add_argument(
'''--column_label''' , type=__lowercase , default=0 , help='''Column of the dataset csv file with example labels.''' )
train_parser.add_argument(
'''--column_text''' , type=__lowercase , default=1 , help='''Column of the dataset csv file with example texts.''' )
train_parser.add_argument(
'''--column_id''' , type=__lowercase , default=2 , help='''Column of the dataset csv file with example ids.''' )
train_parser.add_argument(
'''--skip_first_row''' , action='''store_true''' , help='''Skip the first row of the csv file (headers).''' )
train_parser.add_argument('''--validation_data''' , type=__lowercase , default='''''' , help='''path to validation dataset.''' )
train_parser.add_argument(
'''--validation_split''' , type=__lowercase , default=0.1 , help='''if validation dataset is not provided, fraction of train dataset to use as validation dataset.''' , )
train_parser.add_argument('''--output''' , type=__lowercase , default='''./''' , help='''path to saved the trained model.''' )
train_parser.add_argument(
'''--task''' , type=__lowercase , default='''text_classification''' , help='''Task to train the model on.''' )
train_parser.add_argument(
'''--model''' , type=__lowercase , default='''bert-base-uncased''' , help='''Model\'s name or path to stored model.''' )
train_parser.add_argument('''--train_batch_size''' , type=__lowercase , default=32 , help='''Batch size for training.''' )
train_parser.add_argument('''--valid_batch_size''' , type=__lowercase , default=64 , help='''Batch size for validation.''' )
train_parser.add_argument('''--learning_rate''' , type=__lowercase , default=3e-5 , help='''Learning rate.''' )
train_parser.add_argument('''--adam_epsilon''' , type=__lowercase , default=1e-08 , help='''Epsilon for Adam optimizer.''' )
train_parser.set_defaults(func=__lowercase )
def __init__( self : Tuple , __lowercase : Namespace ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger('''transformers-cli/training''' )
SCREAMING_SNAKE_CASE__ : int ='''tf''' if is_tf_available() else '''torch'''
os.makedirs(args.output , exist_ok=__lowercase )
SCREAMING_SNAKE_CASE__ : Any =args.output
SCREAMING_SNAKE_CASE__ : str =args.column_label
SCREAMING_SNAKE_CASE__ : List[Any] =args.column_text
SCREAMING_SNAKE_CASE__ : Tuple =args.column_id
self.logger.info(F"Loading {args.task} pipeline for {args.model}" )
if args.task == "text_classification":
SCREAMING_SNAKE_CASE__ : List[str] =TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(F"Loading dataset from {args.train_data}" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =None
if args.validation_data:
self.logger.info(F"Loading validation dataset from {args.validation_data}" )
SCREAMING_SNAKE_CASE__ : List[Any] =Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =args.validation_split
SCREAMING_SNAKE_CASE__ : List[Any] =args.train_batch_size
SCREAMING_SNAKE_CASE__ : Any =args.valid_batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =args.learning_rate
SCREAMING_SNAKE_CASE__ : int =args.adam_epsilon
def __magic_name__ ( self : Any ) -> str:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __magic_name__ ( self : Optional[int] ) -> Tuple:
raise NotImplementedError
def __magic_name__ ( self : Dict ) -> List[Any]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output ) | 665 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
a_ = TypeVar('T')
class __SCREAMING_SNAKE_CASE ( Generic[T] ):
snake_case_ = 42 # Cache store of keys
snake_case_ = 42 # References of the keys in cache
snake_case_ = 10 # Maximum capacity of cache
def __init__( self : Dict , __lowercase : int ) -> None:
SCREAMING_SNAKE_CASE__ : Any =deque()
SCREAMING_SNAKE_CASE__ : str =set()
if not n:
SCREAMING_SNAKE_CASE__ : Optional[Any] =sys.maxsize
elif n < 0:
raise ValueError('''n should be an integer greater than 0.''' )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] =n
def __magic_name__ ( self : List[str] , __lowercase : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
SCREAMING_SNAKE_CASE__ : int =self.dq_store.pop()
self.key_reference.remove(__lowercase )
else:
self.dq_store.remove(__lowercase )
self.dq_store.appendleft(__lowercase )
self.key_reference.add(__lowercase )
def __magic_name__ ( self : Union[str, Any] ) -> None:
for k in self.dq_store:
print(__lowercase )
def __repr__( self : List[Any] ) -> str:
return F"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = LRUCache(4)
lru_cache.refer('A')
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer('A')
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 665 | 1 |
'''simple docstring'''
import math
import sys
def A_ ( SCREAMING_SNAKE_CASE_ ) ->int:
if number != int(SCREAMING_SNAKE_CASE_ ):
raise ValueError("""the value of input must be a natural number""" )
if number < 0:
raise ValueError("""the value of input must not be a negative number""" )
if number == 0:
return 1
lowercase_ = [-1] * (number + 1)
lowercase_ = 0
for i in range(1 , number + 1 ):
lowercase_ = sys.maxsize
lowercase_ = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
for j in range(1 , root + 1 ):
lowercase_ = 1 + answers[i - (j**2)]
lowercase_ = min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowercase_ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 451 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case = {
"""configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""],
"""processing_git""": ["""GitProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
"""GIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GitForCausalLM""",
"""GitModel""",
"""GitPreTrainedModel""",
"""GitVisionModel""",
]
if TYPE_CHECKING:
from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig
from .processing_git import GitProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_git import (
GIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GitForCausalLM,
GitModel,
GitPreTrainedModel,
GitVisionModel,
)
else:
import sys
__snake_case = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 451 | 1 |
from __future__ import annotations
class lowerCAmelCase__ :
def __init__( self : Dict , __UpperCamelCase : Optional[int]=None ) -> Any:
A = data
A = None
def __repr__( self : int ) -> Optional[int]:
A = []
A = self
while temp:
string_rep.append(f'''{temp.data}''' )
A = temp.next
return "->".join(__UpperCamelCase )
def lowerCamelCase_ ( lowerCAmelCase__ : list ) -> str:
'''simple docstring'''
if not elements_list:
raise Exception('The Elements List is empty' )
A = A = Node(elements_list[0] )
for i in range(1 , len(lowerCAmelCase__ ) ):
A = Node(elements_list[i] )
A = current.next
return head
def lowerCamelCase_ ( lowerCAmelCase__ : Node ) -> None:
'''simple docstring'''
if head_node is not None and isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
print_reverse(head_node.next )
print(head_node.data )
def lowerCamelCase_ ( ) -> Optional[int]:
'''simple docstring'''
from doctest import testmod
testmod()
A = make_linked_list([14, 52, 14, 12, 43] )
print('Linked List:' )
print(lowerCAmelCase__ )
print('Elements in Reverse:' )
print_reverse(lowerCAmelCase__ )
if __name__ == "__main__":
main()
| 709 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowerCAmelCase__ :
def __init__( self : Any , __UpperCamelCase : str , __UpperCamelCase : Any=13 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : List[str]=True , __UpperCamelCase : Any=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : int=99 , __UpperCamelCase : Any=32 , __UpperCamelCase : int=2 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : int=2 , __UpperCamelCase : Union[str, Any]=0.0_2 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Any=4 , __UpperCamelCase : List[str]=None , ) -> Union[str, Any]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = True
A = 99
A = 384
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.0_2
A = 3
A = 4
A = 128
A = 2
A = 9
A = 1
A = None
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A = ids_tensor([self.batch_size] , self.num_choices )
A = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__UpperCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
A = TFConvBertModel(config=__UpperCamelCase )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = [input_ids, input_mask]
A = model(__UpperCamelCase )
A = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ) -> str:
A = TFConvBertForMaskedLM(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCamelCase ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : List[str] ) -> Tuple:
A = self.num_labels
A = TFConvBertForSequenceClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) -> Tuple:
A = self.num_choices
A = TFConvBertForMultipleChoice(config=__UpperCamelCase )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> Any:
A = self.num_labels
A = TFConvBertForTokenClassification(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCamelCase ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] ) -> str:
A = TFConvBertForQuestionAnswering(config=__UpperCamelCase )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
A = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCamelCase ( self : Any ) -> List[str]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
A_ : Tuple = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : int = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Optional[int] = False
A_ : Any = False
A_ : str = False
def __UpperCamelCase ( self : int ) -> Any:
A = TFConvBertModelTester(self )
A = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def __UpperCamelCase ( self : Optional[int] ) -> Any:
self.config_tester.run_common_tests()
def __UpperCamelCase ( self : Dict ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__UpperCamelCase )
def __UpperCamelCase ( self : Union[str, Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__UpperCamelCase )
def __UpperCamelCase ( self : str ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__UpperCamelCase )
def __UpperCamelCase ( self : Optional[Any] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase )
@slow
def __UpperCamelCase ( self : Optional[Any] ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = True
if hasattr(__UpperCamelCase , 'use_cache' ):
A = True
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
for model_class in self.all_model_classes:
A = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
A = model_class(__UpperCamelCase )
A = len(model(__UpperCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase , saved_model=__UpperCamelCase )
A = os.path.join(__UpperCamelCase , 'saved_model' , '1' )
A = tf.keras.models.load_model(__UpperCamelCase )
A = model(__UpperCamelCase )
if self.is_encoder_decoder:
A = outputs['encoder_hidden_states']
A = outputs['encoder_attentions']
else:
A = outputs['hidden_states']
A = outputs['attentions']
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
A = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __UpperCamelCase ( self : str ) -> str:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(__UpperCamelCase )
def __UpperCamelCase ( self : List[Any] ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
A = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
A = getattr(self.model_tester , 'key_length' , __UpperCamelCase )
def check_decoder_attentions_output(__UpperCamelCase : List[Any] ):
A = len(__UpperCamelCase )
self.assertEqual(out_len % 2 , 0 )
A = outputs.decoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(__UpperCamelCase : Dict ):
A = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
A = True
A = False
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A = len(__UpperCamelCase )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
if self.is_encoder_decoder:
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_decoder_attentions_output(__UpperCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(__UpperCamelCase )
A = model(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(__UpperCamelCase ) )
self.assertEqual(model.config.output_hidden_states , __UpperCamelCase )
check_encoder_attentions_output(__UpperCamelCase )
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
@slow
def __UpperCamelCase ( self : Tuple ) -> Optional[Any]:
A = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(__UpperCamelCase )[0]
A = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
A = tf.constant(
[
[
[-0.0_3_4_7_5_4_9_3, -0.4_6_8_6_0_3_4, -0.3_0_6_3_8_8_3_2],
[0.2_2_6_3_7_2_4_8, -0.2_6_9_8_8_6_4_6, -0.7_4_2_3_4_2_4],
[0.1_0_3_2_4_8_6_8, -0.4_5_0_1_3_5_0_8, -0.5_8_2_8_0_7_8_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) | 224 | 0 |
import argparse
from collections import defaultdict
def lowerCAmelCase_ ( __a , __a , __a , __a , __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: str =F"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__a , "r" ) as f:
lowerCamelCase__: Optional[int] =f.readlines()
lowerCamelCase__: List[str] =F"""class {class_name}("""
lowerCamelCase__: Any =F"""{4 * " "}def {test_name}("""
lowerCamelCase__: Dict =F"""{8 * " "}{correct_line.split()[0]}"""
lowerCamelCase__: Any =F"""{16 * " "}{correct_line.split()[0]}"""
lowerCamelCase__: Tuple =False
lowerCamelCase__: List[str] =False
lowerCamelCase__: List[Any] =False
lowerCamelCase__: int =False
lowerCamelCase__: Optional[int] =0
lowerCamelCase__: Optional[Any] =0
lowerCamelCase__: List[str] =[]
for line in lines:
if line.startswith(__a ):
lowerCamelCase__: List[str] =True
elif in_class and line.startswith(__a ):
lowerCamelCase__: Dict =True
elif in_class and in_func and (line.startswith(__a ) or line.startswith(__a )):
lowerCamelCase__: int =len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCamelCase__: Dict =True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCamelCase__: Union[str, Any] =True
if in_class and in_func and in_line and insert_line:
new_lines.append(F"""{spaces * " "}{correct_line}""" )
lowerCamelCase__: Any =False
else:
new_lines.append(__a )
with open(__a , "w" ) as f:
for line in new_lines:
f.write(__a )
def lowerCAmelCase_ ( __a , __a=None ) -> int:
"""simple docstring"""
if fail is not None:
with open(__a , "r" ) as f:
lowerCamelCase__: str ={l.strip() for l in f.readlines()}
else:
lowerCamelCase__: List[str] =None
with open(__a , "r" ) as f:
lowerCamelCase__: Any =f.readlines()
lowerCamelCase__: str =defaultdict(__a )
for line in correct_lines:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Optional[Any] =line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__a , __a , __a , __a , __a )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
__A = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 59 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__A = logging.getLogger(__name__)
def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , __a = False , ) -> str:
"""simple docstring"""
lowerCamelCase__: int =bnb_quantization_config.load_in_abit
lowerCamelCase__: Any =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
"You have a version of `bitsandbytes` that is not compatible with 8bit quantization,"
" make sure you have the latest version of `bitsandbytes` installed." )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
"You have a version of `bitsandbytes` that is not compatible with 4bit quantization,"
"make sure you have the latest version of `bitsandbytes` installed." )
lowerCamelCase__: List[Any] =[]
# custom device map
if isinstance(__a , __a ) and len(device_map.keys() ) > 1:
lowerCamelCase__: Optional[int] =[key for key, value in device_map.items() if value in ["disk", "cpu"]]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCamelCase__: Any =get_keys_to_not_convert(__a )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__a )
lowerCamelCase__: List[str] =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCamelCase__: List[Any] =[]
lowerCamelCase__: int =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__a )
# compatibility with peft
lowerCamelCase__: List[str] =load_in_abit
lowerCamelCase__: int =load_in_abit
lowerCamelCase__: Tuple =get_parameter_device(__a )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
"It is not recommended to quantize a loaded model. "
"The model should be instantiated under the `init_empty_weights` context manager." )
lowerCamelCase__: Tuple =replace_with_bnb_layers(__a , __a , modules_to_not_convert=__a )
# convert param to the right dtype
lowerCamelCase__: Dict =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCamelCase__: str =name.replace(".weight" , "" ).replace(".bias" , "" )
lowerCamelCase__: Optional[Any] =getattr(__a , __a , __a )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__a ):
param.to(__a )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info(
F"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
"We move the model to cuda." )
return model
elif weights_location is None:
raise RuntimeError(
F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
lowerCamelCase__: str =replace_with_bnb_layers(
__a , __a , modules_to_not_convert=__a )
lowerCamelCase__: Optional[Any] =get_quantized_model_device_map(
__a , __a , __a , max_memory=__a , no_split_module_classes=__a , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCamelCase__: Any =True
lowerCamelCase__: List[str] =any(x in list(device_map.values() ) for x in ["cpu", "disk"] )
load_checkpoint_in_model(
__a , __a , __a , dtype=bnb_quantization_config.torch_dtype , offload_folder=__a , offload_state_dict=__a , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__a , device_map=__a , offload_dir=__a )
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , __a=None ) -> str:
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
lowerCamelCase__: str ={"": torch.cuda.current_device()}
else:
raise RuntimeError("No GPU found. A GPU is needed for quantization." )
logger.info("The device_map was not initialized." "Setting device_map to `{'':torch.cuda.current_device()}`." )
if isinstance(__a , __a ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
"If passing a string for `device_map`, please choose 'auto', 'balanced', 'balanced_low_0' or "
"'sequential'." )
lowerCamelCase__: Optional[int] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCamelCase__: Optional[Any] ={}
lowerCamelCase__: str =special_dtypes
lowerCamelCase__: List[str] =no_split_module_classes
lowerCamelCase__: Dict =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCamelCase__: Optional[Any] =get_balanced_memory(
__a , low_zero=(device_map == "balanced_low_0") , max_memory=__a , **__a , )
lowerCamelCase__: Union[str, Any] =max_memory
lowerCamelCase__: Dict =infer_auto_device_map(__a , **__a )
if isinstance(__a , __a ):
# check if don't have any quantized module on the cpu
lowerCamelCase__: Union[str, Any] =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCamelCase__: List[Any] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
"\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n " )
else:
logger.info(
"Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit" )
del device_map_without_some_modules
return device_map
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None ) -> Optional[Any]:
"""simple docstring"""
if modules_to_not_convert is None:
lowerCamelCase__: List[Any] =[]
lowerCamelCase__ , lowerCamelCase__: Any =_replace_with_bnb_layers(
__a , __a , __a , __a )
if not has_been_replaced:
logger.warning(
"You are loading your model in 8bit or 4bit but no linear modules were found in your model."
" this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers."
" Please double check your model architecture, or submit an issue on github if you think this is"
" a bug." )
return model
def lowerCAmelCase_ ( __a , __a , __a=None , __a=None , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =False
for name, module in model.named_children():
if current_key_name is None:
lowerCamelCase__: Optional[Any] =[]
current_key_name.append(__a )
if isinstance(__a , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCamelCase__: List[str] =".".join(__a )
lowerCamelCase__: Optional[Any] =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCamelCase__: int =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCamelCase__: Optional[int] =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__a , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCamelCase__: Dict =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError("load_in_8bit and load_in_4bit can't be both False" )
lowerCamelCase__: Dict =module.weight.data
if module.bias is not None:
lowerCamelCase__: List[Any] =module.bias.data
bnb_module.requires_grad_(__a )
setattr(__a , __a , __a )
lowerCamelCase__: int =True
if len(list(module.children() ) ) > 0:
lowerCamelCase__ , lowerCamelCase__: List[str] =_replace_with_bnb_layers(
__a , __a , __a , __a )
lowerCamelCase__: Union[str, Any] =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
with init_empty_weights():
lowerCamelCase__: Any =deepcopy(__a ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCamelCase__: str =find_tied_parameters(__a )
# For compatibility with Accelerate < 0.18
if isinstance(__a , __a ):
lowerCamelCase__: int =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCamelCase__: str =sum(__a , [] )
lowerCamelCase__: str =len(__a ) > 0
# Check if it is a base model
lowerCamelCase__: Optional[Any] =False
if hasattr(__a , "base_model_prefix" ):
lowerCamelCase__: Union[str, Any] =not hasattr(__a , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCamelCase__: Optional[int] =list(model.named_children() )
lowerCamelCase__: Optional[int] =[list_modules[-1][0]]
# add last module together with tied weights
lowerCamelCase__: Union[str, Any] =set(__a ) - set(__a )
lowerCamelCase__: List[str] =list(set(__a ) ) + list(__a )
# remove ".weight" from the keys
lowerCamelCase__: List[Any] =[".weight", ".bias"]
lowerCamelCase__: Tuple =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCamelCase__: Optional[Any] =name.replace(__a , "" )
filtered_module_names.append(__a )
return filtered_module_names
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
for m in model.modules():
if isinstance(__a , bnb.nn.Linearabit ):
return True
return False
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
return next(parameter.parameters() ).device
def lowerCAmelCase_ ( __a , __a , __a , __a , __a , __a , __a ) -> Any:
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(__a , __a , 0 , dtype=__a , value=__a )
lowerCamelCase__: Dict =param_name
lowerCamelCase__: Tuple =model
if "." in tensor_name:
lowerCamelCase__: Any =tensor_name.split("." )
for split in splits[:-1]:
lowerCamelCase__: Any =getattr(__a , __a )
if new_module is None:
raise ValueError(F"""{module} has no attribute {split}.""" )
lowerCamelCase__: str =new_module
lowerCamelCase__: int =splits[-1]
# offload weights
lowerCamelCase__: str =False
offload_weight(module._parameters[tensor_name] , __a , __a , index=__a )
if hasattr(module._parameters[tensor_name] , "SCB" ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace("weight" , "SCB" ) , __a , index=__a , )
else:
offload_weight(__a , __a , __a , index=__a )
offload_weight(__a , param_name.replace("weight" , "SCB" ) , __a , index=__a )
set_module_tensor_to_device(__a , __a , "meta" , dtype=__a , value=torch.empty(*param.size() ) )
| 59 | 1 |
import math
def lowerCamelCase__ ( _a , _a):
if (
not isinstance(_a , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * power_factor
def lowerCamelCase__ ( _a , _a):
if (
not isinstance(_a , (int, float))
or power_factor < -1
or power_factor > 1
):
raise ValueError("power_factor must be a valid float value between -1 and 1.")
return apparent_power * math.sqrt(1 - power_factor**2)
if __name__ == "__main__":
import doctest
doctest.testmod() | 193 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(__A ) , 'Tatoeba directory does not exist.' )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __UpperCamelCase ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=a )
@slow
def __UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"] )
@slow
def __UpperCamelCase ( self : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = self.resolver.write_model_card("opus-mt-he-en" , dry_run=a )
assert mmeta["long_pair"] == "heb-eng" | 193 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase__):
lowerCAmelCase_ = ["""pixel_values"""]
def __init__( self , A_ = True , A_ = None , A_ = PILImageResampling.BICUBIC , A_ = True , A_ = True , A_ = 1 / 255 , A_ = None , A_ = True , A_ = None , A_ = None , **A_ , )-> None:
'''simple docstring'''
super().__init__(**lowercase_ )
UpperCamelCase = size if size is not None else {'height': 224, 'width': 224}
UpperCamelCase = get_size_dict(lowercase_ )
UpperCamelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
UpperCamelCase = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name='crop_size' )
UpperCamelCase = do_resize
UpperCamelCase = do_rescale
UpperCamelCase = do_normalize
UpperCamelCase = do_center_crop
UpperCamelCase = crop_size
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = rescale_factor
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def UpperCAmelCase_ ( self , A_ , A_ , A_ = PILImageResampling.BILINEAR , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = get_size_dict(lowercase_ )
if "shortest_edge" in size:
UpperCamelCase = get_resize_output_image_size(lowercase_ , size=size['shortest_edge'] , default_to_square=lowercase_ )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
UpperCamelCase = (size['height'], size['width'])
else:
raise ValueError(F'''Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}''' )
return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
UpperCamelCase = get_size_dict(lowercase_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowercase_ , size=(size['height'], size['width']) , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ = None , **A_ )-> np.ndarray:
'''simple docstring'''
return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , A_ , A_ , A_ , A_ = None , **A_ , )-> np.ndarray:
'''simple docstring'''
return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ )
def UpperCAmelCase_ ( self , A_ , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = None , A_ = ChannelDimension.FIRST , **A_ , )-> BatchFeature:
'''simple docstring'''
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCamelCase = crop_size if crop_size is not None else self.crop_size
UpperCamelCase = get_size_dict(lowercase_ , param_name='crop_size' , default_to_square=lowercase_ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(lowercase_ )
if not is_batched(lowercase_ ):
UpperCamelCase = [images]
if not valid_images(lowercase_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(lowercase_ ) for image in images]
if do_resize:
UpperCamelCase = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images]
if do_center_crop:
UpperCamelCase = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images]
UpperCamelCase = {'pixel_values': images}
return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
| 3 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCAmelCase__ : Dict = logging.getLogger(__name__)
def A ( snake_case__ : Optional[int]=2 , snake_case__ : List[str]=3 , snake_case__ : Tuple=16 , snake_case__ : int = 10 , snake_case__ : int = 2 ) -> Optional[Any]:
'''simple docstring'''
def get_dataset(snake_case__ : int ):
__snake_case = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(snake_case__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__snake_case = get_dataset(snake_case__ )
__snake_case = get_dataset(snake_case__ )
__snake_case = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
__snake_case = DataLoader(snake_case__ , shuffle=snake_case__ , batch_size=snake_case__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def A ( snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : List[Any]=None ) -> Optional[Any]:
'''simple docstring'''
__snake_case = []
for epoch in range(snake_case__ ):
# Train quickly
model.train()
for batch in dataloader:
__snake_case , __snake_case = batch
__snake_case = model(snake_case__ )
__snake_case = torch.nn.functional.mse_loss(snake_case__ , snake_case__ )
accelerator.backward(snake_case__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowercase ( nn.Module ):
def __init__( self) -> Any:
super().__init__()
__snake_case = nn.Parameter(torch.randn(1))
__snake_case = nn.Parameter(torch.randn(1))
def _a ( self , lowercase_) -> Any:
return x * self.a + self.b
class __lowercase ( unittest.TestCase ):
def _a ( self) -> Tuple:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(total_limit=1 , project_dir=lowercase_ , automatic_checkpoint_naming=lowercase_)
# Train baseline
__snake_case = Accelerator(project_config=lowercase_)
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir)) , 1)
def _a ( self) -> Dict:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
# Train baseline
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save initial
__snake_case = os.path.join(lowercase_ , 'initial')
accelerator.save_state(lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
__snake_case = train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
# Train partially
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
accelerator.load_state(lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
__snake_case = train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save everything
__snake_case = os.path.join(lowercase_ , 'checkpoint')
accelerator.save_state(lowercase_)
# Load everything back in and make sure all states work
accelerator.load_state(lowercase_)
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def _a ( self) -> Union[str, Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=lowercase_)
# Train baseline
__snake_case = Accelerator(project_dir=lowercase_ , project_config=lowercase_)
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save initial
accelerator.save_state()
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
__snake_case = train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
# Train partially
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=lowercase_)
__snake_case = Accelerator(project_dir=lowercase_ , project_config=lowercase_)
__snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_)
accelerator.load_state(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_0'))
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
__snake_case = train(2 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_1'))
test_rands += train(1 , lowercase_ , lowercase_ , lowercase_ , lowercase_)
((__snake_case) , (__snake_case)) = model.a.item(), model.b.item()
__snake_case = optimizer.state_dict()
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
self.assertEqual(lowercase_ , lowercase_)
def _a ( self) -> Union[str, Any]:
__snake_case = torch.tensor([1, 2, 3])
__snake_case = torch.tensor([2, 3, 4])
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(net.parameters())
__snake_case = Accelerator()
with self.assertRaises(lowercase_) as ve:
accelerator.register_for_checkpointing(lowercase_ , lowercase_ , lowercase_ , lowercase_)
__snake_case = str(ve.exception)
self.assertTrue('Item at index 0' in message)
self.assertTrue('Item at index 1' in message)
self.assertFalse('Item at index 2' in message)
self.assertFalse('Item at index 3' in message)
def _a ( self) -> Optional[Any]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = torch.optim.Adam(params=model.parameters() , lr=1e-3)
__snake_case = torch.optim.lr_scheduler.StepLR(lowercase_ , step_size=1 , gamma=0.99)
__snake_case , __snake_case = dummy_dataloaders()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=lowercase_)
# Train baseline
__snake_case = Accelerator(project_dir=lowercase_ , project_config=lowercase_)
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = accelerator.prepare(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
# Save initial
accelerator.save_state()
__snake_case = scheduler.state_dict()
train(3 , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_)
self.assertNotEqual(lowercase_ , scheduler.state_dict())
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_0'))
self.assertEqual(lowercase_ , scheduler.state_dict())
def _a ( self) -> Optional[int]:
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
__snake_case = DummyModel()
__snake_case = ProjectConfiguration(automatic_checkpoint_naming=lowercase_ , total_limit=2)
# Train baseline
__snake_case = Accelerator(project_dir=lowercase_ , project_config=lowercase_)
__snake_case = accelerator.prepare(lowercase_)
# Save 3 states:
for _ in range(1_1):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_0')))
self.assertTrue(os.path.exists(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_9')))
self.assertTrue(os.path.exists(os.path.join(lowercase_ , 'checkpoints' , 'checkpoint_10')))
@require_cuda
def _a ( self) -> int:
__snake_case = ['torchrun', F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__)]
execute_subprocess_async(lowercase_ , env=os.environ.copy())
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = "/tmp/accelerate/state_checkpointing"
UpperCAmelCase__ : List[Any] = DummyModel()
UpperCAmelCase__ : Optional[Any] = torch.optim.Adam(params=model.parameters(), lr=1e-3)
UpperCAmelCase__ : Optional[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCAmelCase__ , UpperCAmelCase__ : Dict = dummy_dataloaders()
UpperCAmelCase__ : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCAmelCase__ : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCAmelCase__ , UpperCAmelCase__ : Any = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCAmelCase__ : str = group["params"][0].device
break
assert param_device.type == accelerator.device.type
UpperCAmelCase__ : Optional[int] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
UpperCAmelCase__ : Optional[Any] = group["params"][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
UpperCAmelCase__ : Any = group["params"][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 313 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(""">=""", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
__a : Dict = get_logger(__name__)
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase=0 ):
"""simple docstring"""
os.makedirs(lowercase , exist_ok=lowercase )
with FSDP.state_dict_type(
lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowercase = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowercase = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
__lowercase = os.path.join(lowercase , lowercase )
if accelerator.process_index == 0:
logger.info(F"Saving model to {output_model_file}" )
torch.save(lowercase , lowercase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowercase = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
__lowercase = os.path.join(lowercase , lowercase )
logger.info(F"Saving model to {output_model_file}" )
torch.save(lowercase , lowercase )
logger.info(F"Model saved to {output_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowercase = os.path.join(lowercase , F"{MODEL_NAME}_{model_index}" )
os.makedirs(lowercase , exist_ok=lowercase )
logger.info(F"Saving model to {ckpt_dir}" )
__lowercase = {'''model''': state_dict}
dist_cp.save_state_dict(
state_dict=lowercase , storage_writer=dist_cp.FileSystemWriter(lowercase ) , planner=DefaultSavePlanner() , )
logger.info(F"Model saved to {ckpt_dir}" )
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(lowercase ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'''Set the `sync_module_states` flag to `True` so that model states are synced across processes when '''
'''initializing FSDP object''' )
return
__lowercase = F"{MODEL_NAME}.bin" if model_index == 0 else F"{MODEL_NAME}_{model_index}.bin"
__lowercase = os.path.join(lowercase , lowercase )
logger.info(F"Loading model from {input_model_file}" )
__lowercase = torch.load(lowercase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
__lowercase = (
F"{MODEL_NAME}_rank{accelerator.process_index}.bin"
if model_index == 0
else F"{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"
)
__lowercase = os.path.join(lowercase , lowercase )
logger.info(F"Loading model from {input_model_file}" )
__lowercase = torch.load(lowercase )
logger.info(F"Model loaded from {input_model_file}" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
__lowercase = (
os.path.join(lowercase , F"{MODEL_NAME}_{model_index}" )
if F"{MODEL_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading model from {ckpt_dir}" )
__lowercase = {'''model''': model.state_dict()}
dist_cp.load_state_dict(
state_dict=lowercase , storage_reader=dist_cp.FileSystemReader(lowercase ) , planner=DefaultLoadPlanner() , )
__lowercase = state_dict['''model''']
logger.info(F"Model loaded from {ckpt_dir}" )
model.load_state_dict(lowercase )
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0 ):
"""simple docstring"""
os.makedirs(lowercase , exist_ok=lowercase )
with FSDP.state_dict_type(
lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
__lowercase = FSDP.optim_state_dict(lowercase , lowercase )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
__lowercase = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
__lowercase = os.path.join(lowercase , lowercase )
logger.info(F"Saving Optimizer state to {output_optimizer_file}" )
torch.save(lowercase , lowercase )
logger.info(F"Optimizer state saved in {output_optimizer_file}" )
else:
__lowercase = os.path.join(lowercase , F"{OPTIMIZER_NAME}_{optimizer_index}" )
os.makedirs(lowercase , exist_ok=lowercase )
logger.info(F"Saving Optimizer state to {ckpt_dir}" )
dist_cp.save_state_dict(
state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(lowercase ) , planner=DefaultSavePlanner() , )
logger.info(F"Optimizer state saved in {ckpt_dir}" )
def UpperCAmelCase ( lowercase , lowercase , lowercase , lowercase , lowercase , lowercase=0 ):
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
lowercase , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
__lowercase = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
__lowercase = (
F"{OPTIMIZER_NAME}.bin" if optimizer_index == 0 else F"{OPTIMIZER_NAME}_{optimizer_index}.bin"
)
__lowercase = os.path.join(lowercase , lowercase )
logger.info(F"Loading Optimizer state from {input_optimizer_file}" )
__lowercase = torch.load(lowercase )
logger.info(F"Optimizer state loaded from {input_optimizer_file}" )
else:
__lowercase = (
os.path.join(lowercase , F"{OPTIMIZER_NAME}_{optimizer_index}" )
if F"{OPTIMIZER_NAME}" not in input_dir
else input_dir
)
logger.info(F"Loading Optimizer from {ckpt_dir}" )
__lowercase = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(lowercase ) , )
__lowercase = optim_state['''optimizer''']
logger.info(F"Optimizer loaded from {ckpt_dir}" )
__lowercase = FSDP.optim_state_dict_to_load(lowercase , lowercase , lowercase )
optimizer.load_state_dict(lowercase ) | 522 | import random
from typing import Any
def UpperCAmelCase ( lowercase ):
"""simple docstring"""
for _ in range(len(lowercase ) ):
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase = random.randint(0 , len(lowercase ) - 1 )
__lowercase , __lowercase = data[b], data[a]
return data
if __name__ == "__main__":
__a : List[str] = [0, 1, 2, 3, 4, 5, 6, 7]
__a : Any = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 522 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__ ( self ) -> str:
__magic_name__ : Optional[Any] = 1
__magic_name__ : Dict = 3
__magic_name__ : str = (32, 32)
__magic_name__ : List[Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(lowerCAmelCase__ )
return image
@property
def __magic_name__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__magic_name__ : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
return model
@property
def __magic_name__ ( self ) -> Tuple:
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __magic_name__ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(lowerCAmelCase__ )
@property
def __magic_name__ ( self ) -> List[str]:
def extract(*lowerCAmelCase__ , **lowerCAmelCase__ ):
class snake_case__ :
def __init__( self ) -> List[Any]:
__magic_name__ : Tuple = torch.ones([0] )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Optional[int]:
self.pixel_values.to(lowerCAmelCase__ )
return self
return Out()
return extract
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Dict = """cpu""" # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Optional[int] = self.dummy_cond_unet
__magic_name__ : Optional[int] = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
__magic_name__ : Union[str, Any] = self.dummy_vae
__magic_name__ : Tuple = self.dummy_text_encoder
__magic_name__ : List[str] = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__magic_name__ : Any = 77
__magic_name__ : int = self.dummy_image.to(lowerCAmelCase__ )
__magic_name__ : int = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
__magic_name__ : Union[str, Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
__magic_name__ : Union[str, Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
__magic_name__ : List[str] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : str = """A painting of a squirrel eating a burger"""
__magic_name__ : Dict = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
__magic_name__ : Any = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=lowerCAmelCase__ , )
__magic_name__ : Union[str, Any] = output.images
__magic_name__ : int = torch.Generator(device=lowerCAmelCase__ ).manual_seed(0 )
__magic_name__ : List[Any] = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , guidance_scale=6.0 , num_inference_steps=2 , output_type="""np""" , image=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
__magic_name__ : Optional[Any] = image[0, -3:, -3:, -1]
__magic_name__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
__magic_name__ : Dict = np.array([0.4_4_2_7, 0.3_7_3_1, 0.4_2_4_9, 0.4_9_4_1, 0.4_5_4_6, 0.4_1_4_8, 0.4_1_9_3, 0.4_6_6_6, 0.4_4_9_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __magic_name__ ( self ) -> Optional[Any]:
__magic_name__ : Tuple = self.dummy_cond_unet
__magic_name__ : Dict = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ )
__magic_name__ : Optional[Any] = self.dummy_vae
__magic_name__ : Any = self.dummy_text_encoder
__magic_name__ : Dict = XLMRobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-xlm-roberta""" )
__magic_name__ : str = 77
__magic_name__ : Dict = self.dummy_image.to(lowerCAmelCase__ )
# put models in fp16
__magic_name__ : Union[str, Any] = unet.half()
__magic_name__ : Optional[Any] = vae.half()
__magic_name__ : Optional[int] = bert.half()
# make sure here that pndm scheduler skips prk
__magic_name__ : Optional[Any] = AltDiffusionImgaImgPipeline(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=self.dummy_extractor , )
__magic_name__ : List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=lowerCAmelCase__ )
__magic_name__ : List[Any] = alt_pipe.to(lowerCAmelCase__ )
alt_pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
__magic_name__ : List[str] = """A painting of a squirrel eating a burger"""
__magic_name__ : Tuple = torch.manual_seed(0 )
__magic_name__ : int = alt_pipe(
[prompt] , generator=lowerCAmelCase__ , num_inference_steps=2 , output_type="""np""" , image=lowerCAmelCase__ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __magic_name__ ( self ) -> str:
__magic_name__ : str = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
# resize to resolution that is divisible by 8 but not 16 or 32
__magic_name__ : Tuple = init_image.resize((7_60, 5_04) )
__magic_name__ : List[str] = """BAAI/AltDiffusion"""
__magic_name__ : Any = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : Union[str, Any] = """A fantasy landscape, trending on artstation"""
__magic_name__ : List[str] = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="""np""" , )
__magic_name__ : int = output.images[0]
__magic_name__ : Union[str, Any] = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
__magic_name__ : int = np.array([0.9_3_5_8, 0.9_3_9_7, 0.9_5_9_9, 0.9_9_0_1, 1.0_0_0_0, 1.0_0_0_0, 0.9_8_8_2, 1.0_0_0_0, 1.0_0_0_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
def __magic_name__ ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self ) -> List[Any]:
__magic_name__ : int = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/img2img/sketch-mountains-input.jpg""" )
__magic_name__ : List[Any] = init_image.resize((7_68, 5_12) )
__magic_name__ : List[str] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy""" )
__magic_name__ : str = """BAAI/AltDiffusion"""
__magic_name__ : Any = AltDiffusionImgaImgPipeline.from_pretrained(
lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , )
pipe.to(lowerCAmelCase__ )
pipe.set_progress_bar_config(disable=lowerCAmelCase__ )
pipe.enable_attention_slicing()
__magic_name__ : Any = """A fantasy landscape, trending on artstation"""
__magic_name__ : Optional[int] = torch.manual_seed(0 )
__magic_name__ : Optional[Any] = pipe(
prompt=lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.7_5 , guidance_scale=7.5 , generator=lowerCAmelCase__ , output_type="""np""" , )
__magic_name__ : List[Any] = output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 324 |
from __future__ import annotations
from functools import lru_cache
from math import ceil
__magic_name__: Tuple = 100
__magic_name__: Any = set(range(3, NUM_PRIMES, 2))
primes.add(2)
__magic_name__: int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def UpperCamelCase ( _A ):
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
__magic_name__ : set[int] = set()
__magic_name__ : int
__magic_name__ : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def UpperCamelCase ( _A = 5000 ):
"""simple docstring"""
for number_to_partition in range(1, _A ):
if len(partition(_A ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"""{solution() = }""")
| 324 | 1 |
from arguments import InitializationArguments
from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser
# Configuration
lowerCamelCase_ = HfArgumentParser(InitializationArguments)
lowerCamelCase_ = parser.parse_args()
# Load codeparrot tokenizer trained for Python code tokenization
lowerCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name)
# Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks
lowerCamelCase_ = {
'''vocab_size''': len(tokenizer),
'''scale_attn_by_inverse_layer_idx''': True,
'''reorder_and_upcast_attn''': True,
}
# Load model config (GPT-2 large in this case)
lowerCamelCase_ = AutoConfig.from_pretrained(args.config_name, **config_kwargs)
# Initialize new model with config
lowerCamelCase_ = AutoModelForCausalLM.from_config(config)
# Save model to the hub
model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
| 714 |
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCamelCase_ = {
'''distilbert''': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'''roberta''': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'''bert''': (BertConfig, BertForMaskedLM, BertTokenizer),
'''gpt2''': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __magic_name__ ( __a : Any ):
'''simple docstring'''
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __magic_name__ ( __a : List[Any] , __a : Any ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
elif args.student_type == "gpt2":
UpperCamelCase__ = False
def __magic_name__ ( __a : int , __a : Dict ):
'''simple docstring'''
if args.student_type == "roberta":
UpperCamelCase__ = False
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=__a , required=__a , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=__a , required=__a , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=__a , choices=["""distilbert""", """roberta""", """gpt2"""] , required=__a , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=__a , required=__a , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=__a , type=__a , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=__a , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=__a , required=__a , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=__a , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=__a , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=__a , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=__a , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=__a , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=__a , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=__a , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=__a , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=__a , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=__a , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=__a , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=__a , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=__a , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=__a , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=__a , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=__a , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__a , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5E-4 , type=__a , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-6 , type=__a , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=__a , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=__a , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=__a , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=__a , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=__a , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=__a , default=56 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=__a , default=500 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=__a , default=4_000 , help="""Checkpoint interval.""" )
UpperCamelCase__ = parser.parse_args()
sanity_checks(__a )
# ARGS #
init_gpu_params(__a )
set_seed(__a )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite"
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f"Experiment will be dumped and logged in {args.dump_path}" )
# SAVE PARAMS #
logger.info(f"Param: {args}" )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(__a ) , __a , indent=4 )
git_log(args.dump_path )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.student_type]
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase__ = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCamelCase__ = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase__ = tokenizer.all_special_tokens.index(__a )
UpperCamelCase__ = tokenizer.all_special_ids[idx]
logger.info(f"Special tokens {special_tok_ids}" )
UpperCamelCase__ = special_tok_ids
UpperCamelCase__ = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f"Loading data from {args.data_file}" )
with open(args.data_file , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
if args.mlm:
logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)" )
with open(args.token_counts , """rb""" ) as fp:
UpperCamelCase__ = pickle.load(__a )
UpperCamelCase__ = np.maximum(__a , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase__ = 0.0 # do not predict special tokens
UpperCamelCase__ = torch.from_numpy(__a )
else:
UpperCamelCase__ = None
UpperCamelCase__ = LmSeqsDataset(params=__a , data=__a )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f"Loading student config from {args.student_config}" )
UpperCamelCase__ = student_config_class.from_pretrained(args.student_config )
UpperCamelCase__ = True
if args.student_pretrained_weights is not None:
logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}" )
UpperCamelCase__ = student_model_class.from_pretrained(args.student_pretrained_weights , config=__a )
else:
UpperCamelCase__ = student_model_class(__a )
if args.n_gpu > 0:
student.to(f"cuda:{args.local_rank}" )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCamelCase__ = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=__a )
if args.n_gpu > 0:
teacher.to(f"cuda:{args.local_rank}" )
logger.info(f"Teacher loaded from {args.teacher_name}." )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(__a , __a )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(__a , __a )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase__ = Distiller(
params=__a , dataset=__a , token_probs=__a , student=__a , teacher=__a )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main()
| 86 | 0 |
from math import factorial
SCREAMING_SNAKE_CASE :dict[str, int] = {str(digit): factorial(digit) for digit in range(10)}
def UpperCAmelCase ( a_ ) -> int:
"""simple docstring"""
if not isinstance(a_ , a_ ):
raise TypeError("Parameter number must be int" )
if number < 0:
raise ValueError("Parameter number must be greater than or equal to 0" )
# Converts number in string to iterate on its digits and adds its factorial.
return sum(DIGIT_FACTORIAL[digit] for digit in str(a_ ) )
def UpperCAmelCase ( a_ = 6_0 , a_ = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
if not isinstance(a_ , a_ ) or not isinstance(a_ , a_ ):
raise TypeError("Parameters chain_length and number_limit must be int" )
if chain_length <= 0 or number_limit <= 0:
raise ValueError(
"Parameters chain_length and number_limit must be greater than 0" )
# the counter for the chains with the exact desired length
__A = 0
# the cached sizes of the previous chains
__A = {}
for start_chain_element in range(1 , a_ ):
# The temporary set will contain the elements of the chain
__A = set()
__A = 0
# Stop computing the chain when you find a cached size, a repeating item or the
# length is greater then the desired one.
__A = start_chain_element
while (
chain_element not in chain_sets_lengths
and chain_element not in chain_set
and chain_set_length <= chain_length
):
chain_set.add(a_ )
chain_set_length += 1
__A = digit_factorial_sum(a_ )
if chain_element in chain_sets_lengths:
chain_set_length += chain_sets_lengths[chain_element]
__A = chain_set_length
# If chain contains the exact amount of elements increase the counter
if chain_set_length == chain_length:
chains_counter += 1
return chains_counter
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f'''{solution()}''')
| 55 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
__UpperCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase__ ( __snake_case : str ):
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
UpperCAmelCase_ : Union[str, Any] = model_type_to_module_name(__snake_case )
UpperCAmelCase_ : Union[str, Any] = importlib.import_module(F".{module_name}" , 'transformers.models' )
try:
return getattr(__snake_case , __snake_case )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(__snake_case , '__name__' , __snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
UpperCAmelCase_ : List[Any] = importlib.import_module('transformers' )
if hasattr(__snake_case , __snake_case ):
return getattr(__snake_case , __snake_case )
return None
def lowercase__ ( __snake_case : Union[str, os.PathLike] , __snake_case : Optional[Union[str, os.PathLike]] = None , __snake_case : bool = False , __snake_case : bool = False , __snake_case : Optional[Dict[str, str]] = None , __snake_case : Optional[Union[bool, str]] = None , __snake_case : Optional[str] = None , __snake_case : bool = False , **__snake_case : Tuple , ):
'''simple docstring'''
UpperCAmelCase_ : List[Any] = get_file_from_repo(
__snake_case , __snake_case , cache_dir=__snake_case , force_download=__snake_case , resume_download=__snake_case , proxies=__snake_case , use_auth_token=__snake_case , revision=__snake_case , local_files_only=__snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the feature extractor configuration file, will try to use the model config instead.' )
return {}
with open(__snake_case , encoding='utf-8' ) as reader:
return json.load(__snake_case )
class lowerCamelCase :
'''simple docstring'''
def __init__( self ) -> List[Any]:
raise EnvironmentError(
'AutoFeatureExtractor is designed to be instantiated '
'using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCamelCase )
def __UpperCAmelCase ( cls , _UpperCamelCase , **_UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : List[str] = kwargs.pop('config' , _UpperCamelCase )
UpperCAmelCase_ : Dict = kwargs.pop('trust_remote_code' , _UpperCamelCase )
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : str = config_dict.get('feature_extractor_type' , _UpperCamelCase )
UpperCAmelCase_ : Dict = None
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
UpperCAmelCase_ : Tuple = config_dict['auto_map']['AutoFeatureExtractor']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Dict = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase )
# It could be in `config.feature_extractor_type``
UpperCAmelCase_ : Tuple = getattr(_UpperCamelCase , 'feature_extractor_type' , _UpperCamelCase )
if hasattr(_UpperCamelCase , 'auto_map' ) and "AutoFeatureExtractor" in config.auto_map:
UpperCAmelCase_ : Optional[int] = config.auto_map['AutoFeatureExtractor']
if feature_extractor_class is not None:
UpperCAmelCase_ : Dict = feature_extractor_class_from_name(_UpperCamelCase )
UpperCAmelCase_ : str = feature_extractor_auto_map is not None
UpperCAmelCase_ : Dict = feature_extractor_class is not None or type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING
UpperCAmelCase_ : Union[str, Any] = resolve_trust_remote_code(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if has_remote_code and trust_remote_code:
UpperCAmelCase_ : int = get_class_from_dynamic_module(
_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = kwargs.pop('code_revision' , _UpperCamelCase )
if os.path.isdir(_UpperCamelCase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_UpperCamelCase ) in FEATURE_EXTRACTOR_MAPPING:
UpperCAmelCase_ : List[Any] = FEATURE_EXTRACTOR_MAPPING[type(_UpperCamelCase )]
return feature_extractor_class.from_dict(_UpperCamelCase , **_UpperCamelCase )
raise ValueError(
f"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
f"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
f"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> Tuple:
FEATURE_EXTRACTOR_MAPPING.register(_UpperCamelCase , _UpperCamelCase )
| 406 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class _a ( lowercase__ ):
def __init__( self: Any , UpperCamelCase_: NestedDataStructureLike[PathLike] , UpperCamelCase_: Optional[NamedSplit] = None , UpperCamelCase_: Optional[Features] = None , UpperCamelCase_: str = None , UpperCamelCase_: bool = False , UpperCamelCase_: bool = False , UpperCamelCase_: Optional[str] = None , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Tuple , ) -> Any:
"""simple docstring"""
super().__init__(
UpperCamelCase_ , split=UpperCamelCase_ , features=UpperCamelCase_ , cache_dir=UpperCamelCase_ , keep_in_memory=UpperCamelCase_ , streaming=UpperCamelCase_ , num_proc=UpperCamelCase_ , **UpperCamelCase_ , )
lowercase__ = field
lowercase__ = path_or_paths if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else {self.split: path_or_paths}
lowercase__ = Json(
cache_dir=UpperCamelCase_ , data_files=UpperCamelCase_ , features=UpperCamelCase_ , field=UpperCamelCase_ , **UpperCamelCase_ , )
def lowerCamelCase_ ( self: List[str] ) -> Any:
"""simple docstring"""
if self.streaming:
lowercase__ = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowercase__ = None
lowercase__ = None
lowercase__ = None
lowercase__ = None
self.builder.download_and_prepare(
download_config=UpperCamelCase_ , download_mode=UpperCamelCase_ , verification_mode=UpperCamelCase_ , base_path=UpperCamelCase_ , num_proc=self.num_proc , )
lowercase__ = self.builder.as_dataset(
split=self.split , verification_mode=UpperCamelCase_ , in_memory=self.keep_in_memory )
return dataset
class _a :
def __init__( self: Optional[Any] , UpperCamelCase_: Dataset , UpperCamelCase_: Union[PathLike, BinaryIO] , UpperCamelCase_: Optional[int] = None , UpperCamelCase_: Optional[int] = None , **UpperCamelCase_: Any , ) -> Any:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'num_proc {num_proc} must be an integer > 0.' )
lowercase__ = dataset
lowercase__ = path_or_buf
lowercase__ = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
lowercase__ = num_proc
lowercase__ = "utf-8"
lowercase__ = to_json_kwargs
def lowerCamelCase_ ( self: Union[str, Any] ) -> int:
"""simple docstring"""
lowercase__ = self.to_json_kwargs.pop('''path_or_buf''' , UpperCamelCase_ )
lowercase__ = self.to_json_kwargs.pop('''orient''' , '''records''' )
lowercase__ = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
lowercase__ = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
lowercase__ = self.to_json_kwargs.pop('''compression''' , UpperCamelCase_ )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'`datasets` currently does not support {compression} compression' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=UpperCamelCase_ ) as buffer:
lowercase__ = self._write(file_obj=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'The compression parameter is not supported when writing to a buffer, but compression={compression}'
''' was passed. Please provide a local path instead.''' )
lowercase__ = self._write(
file_obj=self.path_or_buf , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **self.to_json_kwargs )
return written
def lowerCamelCase_ ( self: List[Any] , UpperCamelCase_: int ) -> str:
"""simple docstring"""
lowercase__ = args
lowercase__ = query_table(
table=self.dataset.data , key=slice(UpperCamelCase_ , offset + self.batch_size ) , indices=self.dataset._indices , )
lowercase__ = batch.to_pandas().to_json(
path_or_buf=UpperCamelCase_ , orient=UpperCamelCase_ , lines=UpperCamelCase_ , index=UpperCamelCase_ , **UpperCamelCase_ )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowerCamelCase_ ( self: str , UpperCamelCase_: BinaryIO , UpperCamelCase_: Optional[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: str , **UpperCamelCase_: Optional[int] , ) -> int:
"""simple docstring"""
lowercase__ = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
lowercase__ = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(UpperCamelCase_ )
else:
lowercase__ = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , UpperCamelCase_ , UpperCamelCase_ )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(UpperCamelCase_ )
return written
| 704 |
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
lowerCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class _a ( UpperCamelCase__ ):
_lowercase : bool = field(default=UpperCamelCase__ , metadata={'''help''': '''Whether to use SortishSampler or not.'''} )
_lowercase : bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `max_length` value of the model configuration.'''
)
} , )
_lowercase : Optional[int] = field(
default=UpperCamelCase__ , metadata={
'''help''': (
'''The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default '''
'''to the `num_beams` value of the model configuration.'''
)
} , )
_lowercase : Optional[Union[str, Path, GenerationConfig]] = field(
default=UpperCamelCase__ , metadata={
'''help''': '''Model id, file path or url pointing to a GenerationConfig json file, to use during prediction.'''
} , )
def lowerCamelCase_ ( self: Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase__ = super().to_dict()
for k, v in d.items():
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowercase__ = v.to_dict()
return d
| 429 | 0 |
"""simple docstring"""
from typing import Any
class UpperCAmelCase :
"""simple docstring"""
def __init__( self , _UpperCAmelCase ):
lowercase__: List[str] = data
lowercase__: Optional[Any] = None
def __repr__( self ):
return F"""Node({self.data})"""
class UpperCAmelCase :
"""simple docstring"""
def __init__( self ):
lowercase__: Dict = None
def __iter__( self ):
lowercase__: int = self.head
while node:
yield node.data
lowercase__: List[Any] = node.next
def __len__( self ):
return sum(1 for _ in self )
def __repr__( self ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self , _UpperCAmelCase ):
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , _UpperCAmelCase , _UpperCAmelCase ):
if not 0 <= index < len(self ):
raise ValueError('''list index out of range.''' )
lowercase__: Optional[int] = self.head
for _ in range(_UpperCAmelCase ):
lowercase__: List[Any] = current.next
lowercase__: List[Any] = data
def _snake_case ( self , _UpperCAmelCase ):
self.insert_nth(len(self ) , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase ):
self.insert_nth(0 , _UpperCAmelCase )
def _snake_case ( self , _UpperCAmelCase , _UpperCAmelCase ):
if not 0 <= index <= len(self ):
raise IndexError('''list index out of range''' )
lowercase__: List[Any] = Node(_UpperCAmelCase )
if self.head is None:
lowercase__: Dict = new_node
elif index == 0:
lowercase__: Any = self.head # link new_node to head
lowercase__: Tuple = new_node
else:
lowercase__: Union[str, Any] = self.head
for _ in range(index - 1 ):
lowercase__: int = temp.next
lowercase__: int = temp.next
lowercase__: Any = new_node
def _snake_case ( self ): # print every node data
print(self )
def _snake_case ( self ):
return self.delete_nth(0 )
def _snake_case ( self ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def _snake_case ( self , _UpperCAmelCase = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('''List index out of range.''' )
lowercase__: Any = self.head # default first node
if index == 0:
lowercase__: Tuple = self.head.next
else:
lowercase__: Union[str, Any] = self.head
for _ in range(index - 1 ):
lowercase__: List[str] = temp.next
lowercase__: int = temp.next
lowercase__: List[str] = temp.next.next
return delete_node.data
def _snake_case ( self ):
return self.head is None
def _snake_case ( self ):
lowercase__: Optional[int] = None
lowercase__: Union[str, Any] = self.head
while current:
# Store the current node's next node.
lowercase__: Union[str, Any] = current.next
# Make the current node's next point backwards
lowercase__: Union[str, Any] = prev
# Make the previous node be the current node
lowercase__: Union[str, Any] = current
# Make the current node the next node (to progress iteration)
lowercase__: Any = next_node
# Return prev in order to put the head at the end
lowercase__: int = prev
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(__UpperCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(__UpperCAmelCase ) == i
linked_list.insert_nth(__UpperCAmelCase , i + 1 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(__UpperCAmelCase ) == 9
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
lowercase__: Optional[Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(-8 , 1 ) )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
lowercase__: List[Any] = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'''dlrow olleH''',
7,
5_5_5_5,
0,
-1_9_2.5_5_5_5_5,
'''Hello, world!''',
7_7.9,
Node(1_0 ),
None,
None,
1_2.2_0,
]
lowercase__: Union[str, Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(__UpperCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(__UpperCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
lowercase__: Tuple = linked_list.delete_head()
assert result == -9
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
lowercase__: str = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
lowercase__: List[str] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(__UpperCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('''Hello again, world!''' ) )
assert (
str(__UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(__UpperCAmelCase )
assert (
str(__UpperCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(__UpperCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
from doctest import testmod
testmod()
lowercase__: Any = LinkedList()
linked_list.insert_head(input('''Inserting 1st at head ''' ).strip() )
linked_list.insert_head(input('''Inserting 2nd at head ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
linked_list.insert_tail(input('''\nInserting 1st at tail ''' ).strip() )
linked_list.insert_tail(input('''Inserting 2nd at tail ''' ).strip() )
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nDelete head''' )
linked_list.delete_head()
print('''Delete tail''' )
linked_list.delete_tail()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nReverse linked list''' )
linked_list.reverse()
print('''\nPrint list:''' )
linked_list.print_list()
print('''\nString representation of linked list:''' )
print(__UpperCAmelCase )
print('''\nReading/changing Node data using indexing:''' )
print(F"""Element at Position 1: {linked_list[1]}""" )
lowercase__: Optional[int] = input('''Enter New Value: ''' ).strip()
print('''New list:''' )
print(__UpperCAmelCase )
print(F"""length of linked_list is : {len(__UpperCAmelCase )}""" )
if __name__ == "__main__":
main()
| 586 | """simple docstring"""
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0 , __UpperCAmelCase = 2_2 ) -> int:
lowercase__: Optional[Any] = range(1 , __UpperCAmelCase )
lowercase__: Any = range(1 , __UpperCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(1_0, 2_2) = }''')
| 586 | 1 |
"""simple docstring"""
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class _UpperCAmelCase ( _lowerCAmelCase ):
def __init__( self : Union[str, Any] , _lowercase : Union[str, Any]=0.01 , _lowercase : List[str]=10_00 ):
__UpperCAmelCase = p_stop
__UpperCAmelCase = max_length
def __iter__( self : Dict ):
__UpperCAmelCase = 0
__UpperCAmelCase = False
while not stop and count < self.max_length:
yield count
count += 1
__UpperCAmelCase = random.random() < self.p_stop
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Any , _lowercase : List[Any] , _lowercase : int , _lowercase : List[Any]=False , _lowercase : Optional[int]=True ):
__UpperCAmelCase = [
BatchSamplerShard(_lowercase , 2 , _lowercase , split_batches=_lowercase , even_batches=_lowercase )
for i in range(2 )
]
__UpperCAmelCase = [list(_lowercase ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(_lowercase ) for shard in batch_sampler_shards] , [len(_lowercase ) for e in expected] )
self.assertListEqual(_lowercase , _lowercase )
def a ( self : Any ):
# Check the shards when the dataset is a round multiple of total batch size.
__UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase )
__UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowercase , _lowercase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase )
__UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase )
__UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase )
__UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase )
# Check the shards when the dataset is very small.
__UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(_lowercase , _lowercase )
__UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [[], []]
self.check_batch_sampler_shards(_lowercase , _lowercase )
def a ( self : Optional[int] ):
# Check the shards when the dataset is a round multiple of batch size.
__UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase )
# Check the shards when the dataset is not a round multiple of batch size.
__UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase )
# Check the shards when the dataset is very small.
__UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [[], []]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase )
def a ( self : str ):
# Check the shards when the dataset is a round multiple of total batch size.
__UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=_lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
# Check the shards when the dataset is very small.
__UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=_lowercase )
__UpperCAmelCase = [[], []]
self.check_batch_sampler_shards(_lowercase , _lowercase , even_batches=_lowercase )
def a ( self : Dict ):
# Check the shards when the dataset is a round multiple of batch size.
__UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase , even_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=_lowercase )
# Expected shouldn't change
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase , even_batches=_lowercase )
# Check the shards when the dataset is not a round multiple of batch size.
__UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase , even_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase , even_batches=_lowercase )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase , even_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase , even_batches=_lowercase )
# Check the shards when the dataset is very small.
__UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase , even_batches=_lowercase )
__UpperCAmelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = [[], []]
self.check_batch_sampler_shards(_lowercase , _lowercase , split_batches=_lowercase , even_batches=_lowercase )
def a ( self : Tuple ):
__UpperCAmelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__UpperCAmelCase = [BatchSamplerShard(_lowercase , 2 , _lowercase , even_batches=_lowercase ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def a ( self : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int] , _lowercase : Union[str, Any] , _lowercase : List[str]=False , _lowercase : Optional[int]=2 , _lowercase : int=False ):
random.seed(_lowercase )
__UpperCAmelCase = list(_lowercase )
__UpperCAmelCase = [
IterableDatasetShard(
_lowercase , batch_size=_lowercase , drop_last=_lowercase , num_processes=_lowercase , process_index=_lowercase , split_batches=_lowercase , )
for i in range(_lowercase )
]
__UpperCAmelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(_lowercase )
iterable_dataset_lists.append(list(_lowercase ) )
__UpperCAmelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__UpperCAmelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
self.assertTrue(len(_lowercase ) % shard_batch_size == 0 )
__UpperCAmelCase = []
for idx in range(0 , len(_lowercase ) , _lowercase ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(_lowercase ) < len(_lowercase ):
reference += reference
self.assertListEqual(_lowercase , reference[: len(_lowercase )] )
def a ( self : str ):
__UpperCAmelCase = 42
__UpperCAmelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(_lowercase , _lowercase , batch_size=4 , drop_last=_lowercase , split_batches=_lowercase )
self.check_iterable_dataset_shards(_lowercase , _lowercase , batch_size=4 , drop_last=_lowercase , split_batches=_lowercase )
self.check_iterable_dataset_shards(_lowercase , _lowercase , batch_size=4 , drop_last=_lowercase , split_batches=_lowercase )
self.check_iterable_dataset_shards(_lowercase , _lowercase , batch_size=4 , drop_last=_lowercase , split_batches=_lowercase )
# Edge case with a very small dataset
__UpperCAmelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(_lowercase , _lowercase , batch_size=4 , drop_last=_lowercase , split_batches=_lowercase )
self.check_iterable_dataset_shards(_lowercase , _lowercase , batch_size=4 , drop_last=_lowercase , split_batches=_lowercase )
self.check_iterable_dataset_shards(_lowercase , _lowercase , batch_size=4 , drop_last=_lowercase , split_batches=_lowercase )
self.check_iterable_dataset_shards(_lowercase , _lowercase , batch_size=4 , drop_last=_lowercase , split_batches=_lowercase )
def a ( self : Tuple ):
__UpperCAmelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=_lowercase )
__UpperCAmelCase = SkipBatchSampler(_lowercase , 2 )
self.assertListEqual(list(_lowercase ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ):
__UpperCAmelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Optional[Any] ):
__UpperCAmelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
__UpperCAmelCase = skip_first_batches(_lowercase , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def a ( self : Any ):
__UpperCAmelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(_lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def a ( self : int ):
Accelerator()
__UpperCAmelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(_lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(_lowercase ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 397 |
"""simple docstring"""
from typing import Any
def lowercase__ ( snake_case_ :list , snake_case_ :list , snake_case_ :dict , snake_case_ :dict , snake_case_ :dict , ):
_validation(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
# Creates data structures and fill initial step
__UpperCAmelCase = {}
__UpperCAmelCase = {}
for state in states_space:
__UpperCAmelCase = observations_space[0]
__UpperCAmelCase = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
__UpperCAmelCase = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(snake_case_ ) ):
__UpperCAmelCase = observations_space[o]
__UpperCAmelCase = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
# Update probabilities and pointers dicts
__UpperCAmelCase = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
__UpperCAmelCase = arg_max
# The final observation
__UpperCAmelCase = observations_space[len(snake_case_ ) - 1]
# argmax for given final observation
__UpperCAmelCase = ''''''
__UpperCAmelCase = -1
for k_state in states_space:
__UpperCAmelCase = probabilities[(k_state, final_observation)]
if probability > max_probability:
__UpperCAmelCase = probability
__UpperCAmelCase = k_state
__UpperCAmelCase = arg_max
# Process pointers backwards
__UpperCAmelCase = last_state
__UpperCAmelCase = []
for o in range(len(snake_case_ ) - 1 , -1 , -1 ):
result.append(snake_case_ )
__UpperCAmelCase = pointers[previous, observations_space[o]]
result.reverse()
return result
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_not_empty(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , )
_validate_lists(snake_case_ , snake_case_ )
_validate_dicts(
snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError('''There\'s an empty parameter''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any ):
_validate_list(snake_case_ , '''observations_space''' )
_validate_list(snake_case_ , '''states_space''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list'''
raise ValueError(snake_case_ )
else:
for x in _object:
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a list of strings'''
raise ValueError(snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :Any , snake_case_ :Any , ):
_validate_dict(snake_case_ , '''initial_probabilities''' , snake_case_ )
_validate_nested_dict(snake_case_ , '''transition_probabilities''' )
_validate_nested_dict(snake_case_ , '''emission_probabilities''' )
def lowercase__ ( snake_case_ :Any , snake_case_ :str ):
_validate_dict(_object , snake_case_ , snake_case_ )
for x in _object.values():
_validate_dict(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowercase__ ( snake_case_ :Any , snake_case_ :str , snake_case_ :type , snake_case_ :bool = False ):
if not isinstance(_object , snake_case_ ):
__UpperCAmelCase = F'''{var_name} must be a dict'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object ):
__UpperCAmelCase = F'''{var_name} all keys must be strings'''
raise ValueError(snake_case_ )
if not all(isinstance(snake_case_ , snake_case_ ) for x in _object.values() ):
__UpperCAmelCase = '''nested dictionary ''' if nested else ''''''
__UpperCAmelCase = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(snake_case_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 397 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = -1 ) -> int:
if hi < 0:
__lowerCamelCase : List[str] = len(UpperCAmelCase_ )
while lo < hi:
__lowerCamelCase : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCamelCase : Optional[Any] = mid + 1
else:
__lowerCamelCase : Optional[int] = mid
return lo
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = -1 ) -> int:
if hi < 0:
__lowerCamelCase : Optional[int] = len(UpperCAmelCase_ )
while lo < hi:
__lowerCamelCase : Tuple = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCamelCase : Optional[int] = mid + 1
else:
__lowerCamelCase : List[str] = mid
return lo
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = -1 ) -> None:
sorted_collection.insert(bisect_left(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = -1 ) -> None:
sorted_collection.insert(bisect_right(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ) -> int | None:
__lowerCamelCase : List[str] = 0
__lowerCamelCase : Any = len(UpperCAmelCase_ ) - 1
while left <= right:
__lowerCamelCase : int = left + (right - left) // 2
__lowerCamelCase : Dict = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCamelCase : List[str] = midpoint - 1
else:
__lowerCamelCase : Tuple = midpoint + 1
return None
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int ) -> int | None:
__lowerCamelCase : List[str] = bisect.bisect_left(UpperCAmelCase_ , UpperCAmelCase_ )
if index != len(UpperCAmelCase_ ) and sorted_collection[index] == item:
return index
return None
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ) -> int | None:
if right < left:
return None
__lowerCamelCase : Union[str, Any] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , midpoint - 1 )
else:
return binary_search_by_recursion(UpperCAmelCase_ , UpperCAmelCase_ , midpoint + 1 , UpperCAmelCase_ )
if __name__ == "__main__":
A__ : List[str] = input("""Enter numbers separated by comma:\n""").strip()
A__ : Optional[int] = sorted(int(item) for item in user_input.split(""","""))
A__ : str = int(input("""Enter a single number to be found in the list:\n"""))
A__ : List[Any] = binary_search(collection, target)
if result is None:
print(f'''{target} was not found in {collection}.''')
else:
print(f'''{target} was found at position {result} in {collection}.''')
| 13 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
lowerCAmelCase__ = {
'vocab_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'
),
},
}
lowerCAmelCase__ = {
'yjernite/retribert-base-uncased': 512,
}
lowerCAmelCase__ = {
'yjernite/retribert-base-uncased': {'do_lower_case': True},
}
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase = RetriBertTokenizer
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : Dict , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]="[UNK]" , UpperCAmelCase_ : Tuple="[SEP]" , UpperCAmelCase_ : Tuple="[PAD]" , UpperCAmelCase_ : Optional[int]="[CLS]" , UpperCAmelCase_ : List[str]="[MASK]" , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Tuple=None , **UpperCAmelCase_ : List[str] , ):
super().__init__(
UpperCAmelCase_ , tokenizer_file=UpperCAmelCase_ , do_lower_case=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , tokenize_chinese_chars=UpperCAmelCase_ , strip_accents=UpperCAmelCase_ , **UpperCAmelCase_ , )
UpperCamelCase__ : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get('lowercase' , UpperCAmelCase_) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCAmelCase_) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCAmelCase_) != tokenize_chinese_chars
):
UpperCamelCase__ : List[Any] = getattr(UpperCAmelCase_ , normalizer_state.pop('type'))
UpperCamelCase__ : Optional[Any] = do_lower_case
UpperCamelCase__ : List[str] = strip_accents
UpperCamelCase__ : List[Any] = tokenize_chinese_chars
UpperCamelCase__ : Optional[Any] = normalizer_class(**UpperCAmelCase_)
UpperCamelCase__ : int = do_lower_case
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]=None):
UpperCamelCase__ : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self : Tuple , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
UpperCamelCase__ : List[Any] = [self.sep_token_id]
UpperCamelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def __UpperCamelCase ( self : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
UpperCamelCase__ : str = self._tokenizer.model.save(UpperCAmelCase_ , name=UpperCAmelCase_)
return tuple(UpperCAmelCase_)
| 596 | 0 |
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def lowercase__ ( __A: str ,__A: Union[str, Any]=None ):
'''simple docstring'''
__magic_name__ : Union[str, Any] = None
if token is not None:
__magic_name__ : Any = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
__magic_name__ : Optional[Any] = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__magic_name__ : Dict = requests.get(lowerCAmelCase__ ,headers=lowerCAmelCase__ ).json()
__magic_name__ : Any = {}
try:
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
__magic_name__ : Union[str, Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCAmelCase__ ):
__magic_name__ : Any = requests.get(url + F'''&page={i + 2}''' ,headers=lowerCAmelCase__ ).json()
job_links.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase__ ( __A: List[Any] ,__A: int=None ):
'''simple docstring'''
__magic_name__ : str = None
if token is not None:
__magic_name__ : Optional[int] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
__magic_name__ : Dict = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__magic_name__ : Optional[int] = requests.get(lowerCAmelCase__ ,headers=lowerCAmelCase__ ).json()
__magic_name__ : List[Any] = {}
try:
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
__magic_name__ : List[Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(lowerCAmelCase__ ):
__magic_name__ : Tuple = requests.get(url + F'''&page={i + 2}''' ,headers=lowerCAmelCase__ ).json()
artifacts.update({artifact['''name''']: artifact['''archive_download_url'''] for artifact in result['''artifacts''']} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def lowercase__ ( __A: List[Any] ,__A: Union[str, Any] ,__A: List[Any] ,__A: Optional[Any] ):
'''simple docstring'''
__magic_name__ : Any = None
if token is not None:
__magic_name__ : List[Any] = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': F'''Bearer {token}'''}
__magic_name__ : Optional[int] = requests.get(lowerCAmelCase__ ,headers=lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ )
__magic_name__ : int = result.headers['''Location''']
__magic_name__ : int = requests.get(lowerCAmelCase__ ,allow_redirects=lowerCAmelCase__ )
__magic_name__ : List[str] = os.path.join(lowerCAmelCase__ ,F'''{artifact_name}.zip''' )
with open(lowerCAmelCase__ ,'''wb''' ) as fp:
fp.write(response.content )
def lowercase__ ( __A: Dict ,__A: List[str]=None ):
'''simple docstring'''
__magic_name__ : str = []
__magic_name__ : List[Any] = []
__magic_name__ : Dict = None
with zipfile.ZipFile(lowerCAmelCase__ ) as z:
for filename in z.namelist():
if not os.path.isdir(lowerCAmelCase__ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(lowerCAmelCase__ ) as f:
for line in f:
__magic_name__ : List[Any] = line.decode('''UTF-8''' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__magic_name__ : Optional[Any] = line[: line.index(''': ''' )]
__magic_name__ : Optional[Any] = line[line.index(''': ''' ) + len(''': ''' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('''FAILED ''' ):
# `test` is the test method that failed
__magic_name__ : Optional[int] = line[len('''FAILED ''' ) :]
failed_tests.append(lowerCAmelCase__ )
elif filename == "job_name.txt":
__magic_name__ : Optional[Any] = line
if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(lowerCAmelCase__ )} for `errors` '''
F'''and {len(lowerCAmelCase__ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
''' problem.''' )
__magic_name__ : Union[str, Any] = None
if job_name and job_links:
__magic_name__ : List[Any] = job_links.get(lowerCAmelCase__ ,lowerCAmelCase__ )
# A list with elements of the form (line of error, error, failed test)
__magic_name__ : Dict = [x + [y] + [job_link] for x, y in zip(lowerCAmelCase__ ,lowerCAmelCase__ )]
return result
def lowercase__ ( __A: Union[str, Any] ,__A: Any=None ):
'''simple docstring'''
__magic_name__ : Optional[Any] = []
__magic_name__ : Tuple = [os.path.join(lowerCAmelCase__ ,lowerCAmelCase__ ) for p in os.listdir(lowerCAmelCase__ ) if p.endswith('''.zip''' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(lowerCAmelCase__ ,job_links=lowerCAmelCase__ ) )
return errors
def lowercase__ ( __A: Tuple ,__A: Union[str, Any]=None ):
'''simple docstring'''
__magic_name__ : str = Counter()
counter.update([x[1] for x in logs] )
__magic_name__ : Optional[int] = counter.most_common()
__magic_name__ : List[str] = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__magic_name__ : Any = {'''count''': count, '''failed_tests''': [(x[2], x[0]) for x in logs if x[1] == error]}
__magic_name__ : Optional[int] = dict(sorted(r.items() ,key=lambda __A : item[1]["count"] ,reverse=lowerCAmelCase__ ) )
return r
def lowercase__ ( __A: Optional[Any] ):
'''simple docstring'''
__magic_name__ : int = test.split('''::''' )[0]
if test.startswith('''tests/models/''' ):
__magic_name__ : Dict = test.split('''/''' )[2]
else:
__magic_name__ : str = None
return test
def lowercase__ ( __A: Any ,__A: Union[str, Any]=None ):
'''simple docstring'''
__magic_name__ : Optional[int] = [(x[0], x[1], get_model(x[2] )) for x in logs]
__magic_name__ : List[str] = [x for x in logs if x[2] is not None]
__magic_name__ : Any = {x[2] for x in logs}
__magic_name__ : Union[str, Any] = {}
for test in tests:
__magic_name__ : Dict = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
__magic_name__ : Union[str, Any] = counter.most_common()
__magic_name__ : Tuple = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__magic_name__ : Dict = sum(error_counts.values() )
if n_errors > 0:
__magic_name__ : str = {'''count''': n_errors, '''errors''': error_counts}
__magic_name__ : Tuple = dict(sorted(r.items() ,key=lambda __A : item[1]["count"] ,reverse=lowerCAmelCase__ ) )
return r
def lowercase__ ( __A: Optional[Any] ):
'''simple docstring'''
__magic_name__ : Any = '''| no. | error | status |'''
__magic_name__ : List[str] = '''|-:|:-|:-|'''
__magic_name__ : List[Any] = [header, sep]
for error in reduced_by_error:
__magic_name__ : str = reduced_by_error[error]['''count''']
__magic_name__ : Dict = F'''| {count} | {error[:1_0_0]} | |'''
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
def lowercase__ ( __A: Tuple ):
'''simple docstring'''
__magic_name__ : str = '''| model | no. of errors | major error | count |'''
__magic_name__ : List[str] = '''|-:|-:|-:|-:|'''
__magic_name__ : Dict = [header, sep]
for model in reduced_by_model:
__magic_name__ : Union[str, Any] = reduced_by_model[model]['''count''']
__magic_name__ , __magic_name__ : Dict = list(reduced_by_model[model]['''errors'''].items() )[0]
__magic_name__ : Tuple = F'''| {model} | {count} | {error[:6_0]} | {_count} |'''
lines.append(lowerCAmelCase__ )
return "\n".join(lowerCAmelCase__ )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
__lowerCamelCase : int = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__lowerCamelCase : Any = get_job_links(args.workflow_run_id, token=args.token)
__lowerCamelCase : Optional[Any] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__lowerCamelCase : Tuple = k.find(''' / ''')
__lowerCamelCase : Any = k[index + len(''' / ''') :]
__lowerCamelCase : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__lowerCamelCase : int = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__lowerCamelCase : Any = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__lowerCamelCase : Optional[Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__lowerCamelCase : Tuple = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__lowerCamelCase : Any = reduce_by_error(errors)
__lowerCamelCase : int = reduce_by_model(errors)
__lowerCamelCase : Optional[int] = make_github_table(reduced_by_error)
__lowerCamelCase : Tuple = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
| 721 |
from sklearn.metrics import mean_squared_error
import datasets
__lowerCamelCase : List[str] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
__lowerCamelCase : Optional[int] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
__lowerCamelCase : str = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Dict ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int , lowerCamelCase_ : int=None , lowerCamelCase_ : List[Any]="uniform_average" , lowerCamelCase_ : List[str]=True ) -> List[str]:
__magic_name__ : Tuple = mean_squared_error(
lowerCamelCase_ , lowerCamelCase_ , sample_weight=lowerCamelCase_ , multioutput=lowerCamelCase_ , squared=lowerCamelCase_ )
return {"mse": mse}
| 501 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
_a = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase:
lowercase__ = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class _UpperCAmelCase:
lowercase__ = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
lowercase__ = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
lowercase__ = field(
default=10_24 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase__ = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
lowercase__ = field(
default=1_42 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase__ = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
lowercase__ = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
lowercase__ = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Source language id for translation.'} )
lowercase__ = field(default=lowerCamelCase , metadata={'help': 'Target language id for translation.'} )
lowercase__ = field(default=lowerCamelCase , metadata={'help': '# num_beams to use for evaluation.'} )
lowercase__ = field(
default=lowerCamelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(__snake_case, os.path.join(__snake_case, F'''{split}_results.json''' ) )
def lowerCamelCase__ ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses()
check_output_dir(__snake_case )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ), training_args.fpaa, )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('''Training/evaluation parameters %s''', __snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCamelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = ('''encoder_layerdrop''', '''decoder_layerdrop''', '''dropout''', '''attention_dropout''')
for p in extra_model_params:
if getattr(__snake_case, __snake_case, __snake_case ):
assert hasattr(__snake_case, __snake_case ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(__snake_case, __snake_case, getattr(__snake_case, __snake_case ) )
_UpperCamelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path, from_tf='''.ckpt''' in model_args.model_name_or_path, config=__snake_case, cache_dir=model_args.cache_dir, )
# use task specific params
use_task_specific_params(__snake_case, data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCamelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(__snake_case, (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(__snake_case, __snake_case ):
_UpperCamelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCamelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(__snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCamelCase = SeqaSeqDataset
# Get datasets
_UpperCamelCase = (
dataset_class(
__snake_case, type_path='''train''', data_dir=data_args.data_dir, n_obs=data_args.n_train, max_target_length=data_args.max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_train
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case, type_path='''val''', data_dir=data_args.data_dir, n_obs=data_args.n_val, max_target_length=data_args.val_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCamelCase = (
dataset_class(
__snake_case, type_path='''test''', data_dir=data_args.data_dir, n_obs=data_args.n_test, max_target_length=data_args.test_max_target_length, max_source_length=data_args.max_source_length, prefix=model.config.prefix or '''''', )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCamelCase = (
build_compute_metrics_fn(data_args.task, __snake_case ) if training_args.predict_with_generate else None
)
_UpperCamelCase = SeqaSeqTrainer(
model=__snake_case, args=__snake_case, data_args=__snake_case, train_dataset=__snake_case, eval_dataset=__snake_case, data_collator=SeqaSeqDataCollator(
__snake_case, __snake_case, model.config.decoder_start_token_id, training_args.tpu_num_cores ), compute_metrics=__snake_case, tokenizer=__snake_case, )
_UpperCamelCase = {}
# Training
if training_args.do_train:
logger.info('''*** Train ***''' )
_UpperCamelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCamelCase = train_result.metrics
_UpperCamelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('''train''', __snake_case, training_args.output_dir )
all_metrics.update(__snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir, '''trainer_state.json''' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
_UpperCamelCase = trainer.evaluate(metric_key_prefix='''val''' )
_UpperCamelCase = data_args.n_val
_UpperCamelCase = round(metrics['''val_loss'''], 4 )
if trainer.is_world_process_zero():
handle_metrics('''val''', __snake_case, training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.do_predict:
logger.info('''*** Predict ***''' )
_UpperCamelCase = trainer.predict(test_dataset=__snake_case, metric_key_prefix='''test''' )
_UpperCamelCase = test_output.metrics
_UpperCamelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCamelCase = round(metrics['''test_loss'''], 4 )
handle_metrics('''test''', __snake_case, training_args.output_dir )
all_metrics.update(__snake_case )
if training_args.predict_with_generate:
_UpperCamelCase = tokenizer.batch_decode(
test_output.predictions, skip_special_tokens=__snake_case, clean_up_tokenization_spaces=__snake_case )
_UpperCamelCase = lmap(str.strip, __snake_case )
write_txt_file(__snake_case, os.path.join(training_args.output_dir, '''test_generations.txt''' ) )
if trainer.is_world_process_zero():
save_json(__snake_case, os.path.join(training_args.output_dir, '''all_results.json''' ) )
return all_metrics
def lowerCamelCase__ ( __snake_case ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 19 |
"""simple docstring"""
from math import sqrt
def lowercase (snake_case__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
for i in range(1 , int(sqrt(snake_case__ ) + 1 ) ):
if n % i == 0 and i != sqrt(snake_case__ ):
total += i + n // i
elif i == sqrt(snake_case__ ):
total += i
return total - n
def lowercase (snake_case__ : int = 10_000 ) -> int:
'''simple docstring'''
lowerCAmelCase = sum(
i
for i in range(1 , snake_case__ )
if sum_of_divisors(sum_of_divisors(snake_case__ ) ) == i and sum_of_divisors(snake_case__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 169 | 0 |
"""simple docstring"""
__lowercase = {str(digit): digit**5 for digit in range(10)}
def lowercase ( A_ )-> int:
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(A_ ) )
def lowercase ( )-> int:
'''simple docstring'''
return sum(
number
for number in range(1_000 , 1_000_000 )
if number == digits_fifth_powers_sum(A_ ) )
if __name__ == "__main__":
print(solution())
| 135 |
"""simple docstring"""
import numpy as np
def lowercase ( A_ , A_ , A_ , A_ , A_ )-> Tuple:
'''simple docstring'''
a : List[str] = int(np.ceil((x_end - xa) / h ) )
a : Optional[int] = np.zeros((n + 1,) )
a : Tuple = ya
a : Union[str, Any] = xa
for k in range(A_ ):
a : Any = f(A_ , y[k] )
a : List[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
a : str = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
a : Union[str, Any] = f(x + h , y[k] + h * ka )
a : Dict = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 135 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_UpperCamelCase = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 453 |
"""simple docstring"""
from __future__ import annotations
_UpperCamelCase = []
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ ) -> bool:
for i in range(len(lowercase__ ) ):
if board[row][i] == 1:
return False
for i in range(len(lowercase__ ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , -1 , -1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(lowercase__ , -1 , -1 ) , range(lowercase__ , len(lowercase__ ) ) ):
if board[i][j] == 1:
return False
return True
def SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ ) -> bool:
if row >= len(lowercase__ ):
solution.append(lowercase__ )
printboard(lowercase__ )
print()
return True
for i in range(len(lowercase__ ) ):
if is_safe(lowercase__ , lowercase__ , lowercase__ ):
lowerCAmelCase__ : List[Any] = 1
solve(lowercase__ , row + 1 )
lowerCAmelCase__ : Dict = 0
return False
def SCREAMING_SNAKE_CASE ( lowercase__ ) -> None:
for i in range(len(lowercase__ ) ):
for j in range(len(lowercase__ ) ):
if board[i][j] == 1:
print("Q" , end=" " )
else:
print("." , end=" " )
print()
# n=int(input("The no. of queens"))
_UpperCamelCase = 8
_UpperCamelCase = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print("""The total no. of solutions are :""", len(solution))
| 453 | 1 |
"""simple docstring"""
def __A ( a_ : int = 1 , a_ : int = 10_00 )-> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = 1
SCREAMING_SNAKE_CASE : List[str] = 0
for divide_by_number in range(a_ , digit + 1 ):
SCREAMING_SNAKE_CASE : list[int] = []
SCREAMING_SNAKE_CASE : str = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(a_ ):
SCREAMING_SNAKE_CASE : int = len(a_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = divide_by_number
else:
has_been_divided.append(a_ )
SCREAMING_SNAKE_CASE : List[str] = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
"""simple docstring"""
def __A ( a_ : int )-> list[int]:
'''simple docstring'''
if num <= 0:
raise ValueError('''Input must be a positive integer''' )
SCREAMING_SNAKE_CASE : Optional[int] = [True] * (num + 1)
SCREAMING_SNAKE_CASE : Optional[Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , a_ ):
SCREAMING_SNAKE_CASE : Any = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ : str = int(input("Enter a positive integer: ").strip())
print(prime_sieve_eratosthenes(user_num))
| 18 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE : Tuple =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Any ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
__SCREAMING_SNAKE_CASE : Tuple =[
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
for attribute in key.split(""".""" ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
lowercase = """lm_head"""
lowercase = getattr(UpperCamelCase__ ,UpperCamelCase__ )
if weight_type is not None:
lowercase = getattr(UpperCamelCase__ ,UpperCamelCase__ ).shape
else:
lowercase = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase = value
elif weight_type == "weight_g":
lowercase = value
elif weight_type == "weight_v":
lowercase = value
elif weight_type == "bias":
lowercase = value
else:
lowercase = value
logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = []
lowercase = fairseq_model.state_dict()
lowercase = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
lowercase = False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,hf_model.config.feat_extract_norm == """group""" ,)
lowercase = True
else:
for key, mapped_key in MAPPING.items():
lowercase = """unispeech.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
lowercase = True
if "*" in mapped_key:
lowercase = name.split(UpperCamelCase__ )[0].split(""".""" )[-2]
lowercase = mapped_key.replace("""*""" ,UpperCamelCase__ )
if "weight_g" in name:
lowercase = """weight_g"""
elif "weight_v" in name:
lowercase = """weight_v"""
elif "bias" in name:
lowercase = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase = """weight"""
else:
lowercase = None
set_recursively(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(f"""Unused weights: {unused_weights}""" )
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ):
lowercase = full_name.split("""conv_layers.""" )[-1]
lowercase = name.split(""".""" )
lowercase = int(items[0] )
lowercase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def UpperCamelCase__ ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__=None ,lowerCAmelCase__=None ,lowerCAmelCase__=True ):
if config_path is not None:
lowercase = UniSpeechConfig.from_pretrained(UpperCamelCase__ )
else:
lowercase = UniSpeechConfig()
if is_finetuned:
if dict_path:
lowercase = Dictionary.load_from_json(UpperCamelCase__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase = target_dict.pad_index
lowercase = target_dict.bos_index
lowercase = target_dict.eos_index
lowercase = len(target_dict.symbols )
lowercase = os.path.join(UpperCamelCase__ ,"""vocab.json""" )
if not os.path.isdir(UpperCamelCase__ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(UpperCamelCase__ ) )
return
os.makedirs(UpperCamelCase__ ,exist_ok=UpperCamelCase__ )
lowercase = target_dict.indices
# fairseq has the <pad> and <s> switched
lowercase = 42
lowercase = 43
with open(UpperCamelCase__ ,"""w""" ,encoding="""utf-8""" ) as vocab_handle:
json.dump(UpperCamelCase__ ,UpperCamelCase__ )
lowercase = WavaVecaPhonemeCTCTokenizer(
UpperCamelCase__ ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token="""|""" ,do_lower_case=UpperCamelCase__ ,)
lowercase = True if config.feat_extract_norm == """layer""" else False
lowercase = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=16_000 ,padding_value=0 ,do_normalize=UpperCamelCase__ ,return_attention_mask=UpperCamelCase__ ,)
lowercase = WavaVecaProcessor(feature_extractor=UpperCamelCase__ ,tokenizer=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
lowercase = UniSpeechForCTC(UpperCamelCase__ )
else:
lowercase = UniSpeechForPreTraining(UpperCamelCase__ )
if is_finetuned:
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] ), """w2v_path""": checkpoint_path} )
else:
lowercase , lowercase , lowercase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase = model[0].eval()
recursively_load_weights(UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ )
hf_unispeech.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : List[Any] =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__SCREAMING_SNAKE_CASE : Any =parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 428 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase__ ):
lowerCamelCase_ = "encoder-decoder"
lowerCamelCase_ = True
def __init__( self :Optional[int] , **__A :str ) -> int:
"""simple docstring"""
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
SCREAMING_SNAKE_CASE__ = kwargs.pop("""encoder""" )
SCREAMING_SNAKE_CASE__ = encoder_config.pop("""model_type""" )
SCREAMING_SNAKE_CASE__ = kwargs.pop("""decoder""" )
SCREAMING_SNAKE_CASE__ = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = AutoConfig.for_model(__A , **__A )
SCREAMING_SNAKE_CASE__ = True
@classmethod
def _snake_case ( cls :str , __A :PretrainedConfig , __A :PretrainedConfig , **__A :List[str] ) -> PretrainedConfig:
"""simple docstring"""
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ = self.encoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.decoder.to_dict()
SCREAMING_SNAKE_CASE__ = self.__class__.model_type
return output | 6 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if len(__a ) < k or k < 0:
raise ValueError("Invalid Input" )
lowerCamelCase__: Dict =sum(array[:k] )
for i in range(len(__a ) - k ):
lowerCamelCase__: Optional[Any] =current_sum - array[i] + array[i + k]
lowerCamelCase__: List[Any] =max(__a , __a )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
__A = [randint(-1000, 1000) for i in range(100)]
__A = randint(0, 110)
print(f'The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}')
| 437 |
import os
import sys
import unittest
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
__A = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
__A = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: List[str] =get_test_to_tester_mapping(UpperCAmelCase_)
lowerCamelCase__: List[str] =get_test_to_tester_mapping(UpperCAmelCase_)
lowerCamelCase__: List[Any] ={"BertModelTest": "BertModelTester"}
lowerCamelCase__: Optional[int] ={
"BlipModelTest": "BlipModelTester",
"BlipTextImageModelTest": "BlipTextImageModelsModelTester",
"BlipTextModelTest": "BlipTextModelTester",
"BlipTextRetrievalModelTest": "BlipTextRetrievalModelTester",
"BlipVQAModelTest": "BlipVQAModelTester",
"BlipVisionModelTest": "BlipVisionModelTester",
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->str:
'''simple docstring'''
lowerCamelCase__: str =get_model_to_test_mapping(UpperCAmelCase_)
lowerCamelCase__: int =get_model_to_test_mapping(UpperCAmelCase_)
lowerCamelCase__: Tuple ={
"BertForMaskedLM": ["BertModelTest"],
"BertForMultipleChoice": ["BertModelTest"],
"BertForNextSentencePrediction": ["BertModelTest"],
"BertForPreTraining": ["BertModelTest"],
"BertForQuestionAnswering": ["BertModelTest"],
"BertForSequenceClassification": ["BertModelTest"],
"BertForTokenClassification": ["BertModelTest"],
"BertLMHeadModel": ["BertModelTest"],
"BertModel": ["BertModelTest"],
}
lowerCamelCase__: Any ={
"BlipForConditionalGeneration": ["BlipTextImageModelTest"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTest"],
"BlipForQuestionAnswering": ["BlipVQAModelTest"],
"BlipModel": ["BlipModelTest"],
"BlipTextModel": ["BlipTextModelTest"],
"BlipVisionModel": ["BlipVisionModelTest"],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =get_model_to_tester_mapping(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =get_model_to_tester_mapping(UpperCAmelCase_)
lowerCamelCase__: Any ={
"BertForMaskedLM": ["BertModelTester"],
"BertForMultipleChoice": ["BertModelTester"],
"BertForNextSentencePrediction": ["BertModelTester"],
"BertForPreTraining": ["BertModelTester"],
"BertForQuestionAnswering": ["BertModelTester"],
"BertForSequenceClassification": ["BertModelTester"],
"BertForTokenClassification": ["BertModelTester"],
"BertLMHeadModel": ["BertModelTester"],
"BertModel": ["BertModelTester"],
}
lowerCamelCase__: Optional[Any] ={
"BlipForConditionalGeneration": ["BlipTextImageModelsModelTester"],
"BlipForImageTextRetrieval": ["BlipTextRetrievalModelTester"],
"BlipForQuestionAnswering": ["BlipVQAModelTester"],
"BlipModel": ["BlipModelTester"],
"BlipTextModel": ["BlipTextModelTester"],
"BlipVisionModel": ["BlipVisionModelTester"],
}
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(get_test_info.to_json(UpperCAmelCase_) , UpperCAmelCase_)
| 437 | 1 |
from __future__ import annotations
def lowercase ( __A : list[list[int]] ) -> bool:
'''simple docstring'''
snake_case : Dict = len(__A )
# We need to create solution object to save path.
snake_case : Union[str, Any] = [[0 for _ in range(__A )] for _ in range(__A )]
snake_case : str = run_maze(__A , 0 , 0 , __A )
if solved:
print("""\n""".join(str(__A ) for row in solutions ) )
else:
print("""No solution exists!""" )
return solved
def lowercase ( __A : list[list[int]] , __A : int , __A : int , __A : list[list[int]] ) -> bool:
'''simple docstring'''
snake_case : Any = len(__A )
# Final check point.
if i == j == (size - 1):
snake_case : str = 1
return True
snake_case : str = (not i < 0) and (not j < 0) # Check lower bounds
snake_case : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
snake_case : Optional[int] = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
snake_case : List[str] = 1
# check for directions
if (
run_maze(__A , i + 1 , __A , __A )
or run_maze(__A , __A , j + 1 , __A )
or run_maze(__A , i - 1 , __A , __A )
or run_maze(__A , __A , j - 1 , __A )
):
return True
snake_case : Optional[int] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
'''simple docstring'''
from __future__ import annotations
import time
import numpy as np
_UpperCAmelCase : int = [8, 5, 9, 7]
_UpperCAmelCase : List[str] = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
_UpperCAmelCase : Union[str, Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class __magic_name__ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
lowercase =claim_vector
lowercase =allocated_resources_table
lowercase =maximum_claim_table
def _A( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def _A( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def _A( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def _A( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def _A( self , **snake_case_ ):
lowercase =self.__need()
lowercase =self.__allocated_resources_table
lowercase =self.__available_resources()
lowercase =self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('''_''' * 50 + '''\n''' )
while need_list:
lowercase =False
for each_need in need_list:
lowercase =True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
lowercase =False
break
if execution:
lowercase =True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
lowercase =original_need_index
print(f'Process {process_number + 1} is executing.' )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
lowercase =np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'''Updated available resource stack for processes: '''
+ ''' '''.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('''The process is in a safe state.\n''' )
else:
print('''System in unsafe state. Aborting...\n''' )
break
def _A( self ):
print(''' ''' * 9 + '''Allocated Resource Table''' )
for item in self.__allocated_resources_table:
print(
f'P{self.__allocated_resources_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(''' ''' * 9 + '''System Resource Table''' )
for item in self.__maximum_claim_table:
print(
f'P{self.__maximum_claim_table.index(snake_case_ ) + 1}'
+ ''' '''.join(f'{it:>8}' for it in item )
+ '''\n''' )
print(
'''Current Usage by Active Processes: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'''Initial Available Resources: '''
+ ''' '''.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 72 | 0 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
A : Dict = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
A : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
A : Tuple = "|".join(sys.argv[1:])
A : Optional[Any] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
A : Dict = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 282 |
"""simple docstring"""
import gc
import threading
import time
import psutil
import torch
class _UpperCamelCase :
'''simple docstring'''
def __init__( self ):
__lowerCAmelCase = psutil.Process()
__lowerCAmelCase = False
def snake_case ( self ):
__lowerCAmelCase = -1
while True:
__lowerCAmelCase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def snake_case ( self ):
__lowerCAmelCase = True
__lowerCAmelCase = threading.Thread(target=self.peak_monitor )
__lowerCAmelCase = True
self.thread.start()
def snake_case ( self ):
__lowerCAmelCase = False
self.thread.join()
return self.cpu_memory_peak
A : Any = PeakCPUMemory()
def _lowerCamelCase ( ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = torch.cuda.memory_allocated(_UpperCamelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
__lowerCAmelCase = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
__lowerCAmelCase = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
__lowerCAmelCase = (torch.cuda.memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
__lowerCAmelCase = (torch.cuda.max_memory_allocated(_UpperCamelCase ) - start_measures[str(_UpperCamelCase )]) / 2**20
return measures
def _lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
'''simple docstring'''
print(f"{description}:" )
print(f"- Time: {measures['time']:.2f}s" )
for i in range(torch.cuda.device_count() ):
print(f"- GPU {i} allocated: {measures[str(_UpperCamelCase )]:.2f}MiB" )
__lowerCAmelCase = measures[f"{i}-peak"]
print(f"- GPU {i} peak: {peak:.2f}MiB" )
print(f"- CPU RAM allocated: {measures['cpu']:.2f}MiB" )
print(f"- CPU RAM peak: {measures['cpu-peak']:.2f}MiB" )
| 282 | 1 |
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __SCREAMING_SNAKE_CASE ( *a__ : Union[str, Any] ) -> Union[str, Any]:
with open(a_ ,"""r""" ) as fh:
fcntl.flock(a_ ,fcntl.LOCK_EX )
try:
print(*a_ )
finally:
fcntl.flock(a_ ,fcntl.LOCK_UN )
UpperCAmelCase_ : int = int(os.environ['''LOCAL_RANK'''])
torch.cuda.set_device(local_rank)
UpperCAmelCase_ : int = torch.device('''cuda''', local_rank)
UpperCAmelCase_ : Dict = socket.gethostname()
UpperCAmelCase_ : Any = f"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group('''nccl''')
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
UpperCAmelCase_ : Optional[int] = dist.get_rank()
UpperCAmelCase_ : str = dist.get_world_size()
printflock(f"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(f"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(f"""{gpu} is broken""")
raise
| 17 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = (DDPMScheduler,)
def _lowerCamelCase ( self , **_UpperCAmelCase ):
__a : int = {
'''num_train_timesteps''': 1000,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**_UpperCAmelCase )
return config
def _lowerCamelCase ( self ):
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def _lowerCamelCase ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def _lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def _lowerCamelCase ( self ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Dict = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1e-5
def _lowerCamelCase ( self ):
__a : int = self.scheduler_classes[0]
__a : int = self.get_scheduler_config()
__a : Optional[Any] = scheduler_class(**_UpperCAmelCase )
__a : int = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[Any] = self.dummy_sample_deter
__a : Union[str, Any] = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Optional[int] = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : List[Any] = pred_prev_sample
__a : int = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : Union[str, Any] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _lowerCamelCase ( self ):
__a : Dict = self.scheduler_classes[0]
__a : int = self.get_scheduler_config(prediction_type='''v_prediction''' )
__a : int = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = len(_UpperCAmelCase )
__a : List[str] = self.dummy_model()
__a : List[str] = self.dummy_sample_deter
__a : str = torch.manual_seed(0 )
for t in reversed(range(_UpperCAmelCase ) ):
# 1. predict noise residual
__a : Dict = model(_UpperCAmelCase , _UpperCAmelCase )
# 2. predict previous mean of sample x_t-1
__a : Dict = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , generator=_UpperCAmelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__a : Optional[int] = pred_prev_sample
__a : Optional[int] = torch.sum(torch.abs(_UpperCAmelCase ) )
__a : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Any = self.get_scheduler_config()
__a : str = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
__a : List[Any] = scheduler.timesteps
for i, timestep in enumerate(_UpperCAmelCase ):
if i == len(_UpperCAmelCase ) - 1:
__a : Union[str, Any] = -1
else:
__a : str = timesteps[i + 1]
__a : Dict = scheduler.previous_timestep(_UpperCAmelCase )
__a : str = prev_t.item()
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Tuple = self.scheduler_classes[0]
__a : Dict = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Optional[Any] = [100, 87, 50, 51, 0]
with self.assertRaises(_UpperCAmelCase , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : List[Any] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : Any = scheduler_class(**_UpperCAmelCase )
__a : Union[str, Any] = [100, 87, 50, 1, 0]
__a : Optional[int] = len(_UpperCAmelCase )
with self.assertRaises(_UpperCAmelCase , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase )
def _lowerCamelCase ( self ):
__a : Optional[int] = self.scheduler_classes[0]
__a : Optional[Any] = self.get_scheduler_config()
__a : List[str] = scheduler_class(**_UpperCAmelCase )
__a : List[Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_UpperCAmelCase , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=_UpperCAmelCase ) | 52 | 0 |
'''simple docstring'''
from collections import defaultdict
class lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
A__ : Optional[int] =total # total no of tasks (N)
# DP table will have a dimension of (2^M)*N
# initially all values are set to -1
A__ : int =[
[-1 for i in range(total + 1 )] for j in range(2 ** len(lowerCAmelCase_ ) )
]
A__ : Dict =defaultdict(lowerCAmelCase_ ) # stores the list of persons for each task
# final_mask is used to check if all persons are included by setting all bits
# to 1
A__ : Union[str, Any] =(1 << len(lowerCAmelCase_ )) - 1
def lowercase__ ( self : List[str] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : List[Any] ) -> List[str]:
'''simple docstring'''
# if mask == self.finalmask all persons are distributed tasks, return 1
if mask == self.final_mask:
return 1
# if not everyone gets the task and no more tasks are available, return 0
if task_no > self.total_tasks:
return 0
# if case already considered
if self.dp[mask][task_no] != -1:
return self.dp[mask][task_no]
# Number of ways when we don't this task in the arrangement
A__ : List[str] =self.count_ways_until(lowerCAmelCase_ , task_no + 1 )
# now assign the tasks one by one to all possible persons and recursively
# assign for the remaining tasks.
if task_no in self.task:
for p in self.task[task_no]:
# if p is already given a task
if mask & (1 << p):
continue
# assign this task to p and change the mask value. And recursively
# assign tasks with the new mask value.
total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 )
# save the value.
A__ : Dict =total_ways_util
return self.dp[mask][task_no]
def lowercase__ ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Any:
'''simple docstring'''
# Store the list of persons for each task
for i in range(len(lowerCAmelCase_ ) ):
for j in task_performed[i]:
self.task[j].append(lowerCAmelCase_ )
# call the function to fill the DP table, final answer is stored in dp[0][1]
return self.count_ways_until(0 , 1 )
if __name__ == "__main__":
__snake_case : List[Any] = 5 # total no of tasks (the value of N)
# the list of tasks that can be done by M persons.
__snake_case : Union[str, Any] = [[1, 3, 4], [1, 2, 5], [3, 4]]
print(
AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways(
task_performed
)
)
| 721 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
class lowerCamelCase ( lowercase_ ):
'''simple docstring'''
__snake_case = 'linear'
__snake_case = 'cosine'
__snake_case = 'cosine_with_restarts'
__snake_case = 'polynomial'
__snake_case = 'constant'
__snake_case = 'constant_with_warmup'
__snake_case = 'piecewise_constant'
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int = -1 ) -> List[str]:
"""simple docstring"""
return LambdaLR(__snake_case, lambda __snake_case : 1, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1.0, __snake_case ) )
return 1.0
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : str, __snake_case : int = -1 ) -> Optional[Any]:
"""simple docstring"""
A__ : str ={}
A__ : Tuple =step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
A__ , A__ : int =rule_str.split(""":""" )
A__ : Optional[int] =int(__snake_case )
A__ : List[Any] =float(__snake_case )
A__ : Union[str, Any] =value
A__ : int =float(rule_list[-1] )
def create_rules_function(__snake_case : int, __snake_case : Dict ):
def rule_func(__snake_case : int ) -> float:
A__ : Any =sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__snake_case ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
A__ : Any =create_rules_function(__snake_case, __snake_case )
return LambdaLR(__snake_case, __snake_case, last_epoch=__snake_case )
def __lowerCamelCase ( __snake_case : List[Any], __snake_case : Dict, __snake_case : List[Any], __snake_case : Any=-1 ) -> int:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
return max(
0.0, float(num_training_steps - current_step ) / float(max(1, num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : float = 0.5, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : Dict ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : List[str] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(__snake_case ) * 2.0 * progress )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : Optimizer, __snake_case : int, __snake_case : int, __snake_case : int = 1, __snake_case : int = -1 ) -> Dict:
"""simple docstring"""
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
A__ : Union[str, Any] =float(current_step - num_warmup_steps ) / float(max(1, num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(__snake_case ) * progress) % 1.0) )) )
return LambdaLR(__snake_case, __snake_case, __snake_case )
def __lowerCamelCase ( __snake_case : int, __snake_case : int, __snake_case : Optional[int], __snake_case : Optional[int]=1E-7, __snake_case : List[Any]=1.0, __snake_case : Any=-1 ) -> List[Any]:
"""simple docstring"""
A__ : Optional[int] =optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" )
def lr_lambda(__snake_case : int ):
if current_step < num_warmup_steps:
return float(__snake_case ) / float(max(1, __snake_case ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
A__ : List[Any] =lr_init - lr_end
A__ : Any =num_training_steps - num_warmup_steps
A__ : Tuple =1 - (current_step - num_warmup_steps) / decay_steps
A__ : List[str] =lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__snake_case, __snake_case, __snake_case )
__snake_case : int = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCamelCase ( __snake_case : Union[str, SchedulerType], __snake_case : Optimizer, __snake_case : Optional[str] = None, __snake_case : Optional[int] = None, __snake_case : Optional[int] = None, __snake_case : int = 1, __snake_case : float = 1.0, __snake_case : int = -1, ) -> Tuple:
"""simple docstring"""
A__ : Tuple =SchedulerType(__snake_case )
A__ : List[Any] =TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__snake_case, last_epoch=__snake_case )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__snake_case, step_rules=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"{name} requires `num_warmup_steps`, please provide that argument." )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__snake_case, num_warmup_steps=__snake_case, last_epoch=__snake_case )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"{name} requires `num_training_steps`, please provide that argument." )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, num_cycles=__snake_case, last_epoch=__snake_case, )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, power=__snake_case, last_epoch=__snake_case, )
return schedule_func(
__snake_case, num_warmup_steps=__snake_case, num_training_steps=__snake_case, last_epoch=__snake_case )
| 687 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a :int = logging.get_logger(__name__)
a :Optional[Any] = {
"CarlCochet/trajectory-transformer-halfcheetah-medium-v2": (
"https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json"
),
# See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer
}
class __a (UpperCamelCase_):
'''simple docstring'''
_SCREAMING_SNAKE_CASE :Dict = """trajectory_transformer"""
_SCREAMING_SNAKE_CASE :Dict = ["""past_key_values"""]
_SCREAMING_SNAKE_CASE :Union[str, Any] = {
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self , _a=100 , _a=5 , _a=1 , _a=1 , _a=249 , _a=6 , _a=17 , _a=25 , _a=4 , _a=4 , _a=128 , _a=0.1 , _a=0.1 , _a=0.1 , _a=0.0_006 , _a=512 , _a=0.02 , _a=1E-1_2 , _a=1 , _a=True , _a=1 , _a=50_256 , _a=50_256 , **_a , ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = action_weight
SCREAMING_SNAKE_CASE__ : List[str] = reward_weight
SCREAMING_SNAKE_CASE__ : int = value_weight
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = block_size
SCREAMING_SNAKE_CASE__ : Any = action_dim
SCREAMING_SNAKE_CASE__ : Tuple = observation_dim
SCREAMING_SNAKE_CASE__ : Optional[Any] = transition_dim
SCREAMING_SNAKE_CASE__ : List[Any] = learning_rate
SCREAMING_SNAKE_CASE__ : Tuple = n_layer
SCREAMING_SNAKE_CASE__ : str = n_head
SCREAMING_SNAKE_CASE__ : int = n_embd
SCREAMING_SNAKE_CASE__ : List[str] = embd_pdrop
SCREAMING_SNAKE_CASE__ : Any = attn_pdrop
SCREAMING_SNAKE_CASE__ : List[Any] = resid_pdrop
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : str = kaiming_initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_cache
super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ )
| 680 |
"""simple docstring"""
import torch
from diffusers import StableDiffusionPipeline
UpperCamelCase__ = 'path-to-your-trained-model'
UpperCamelCase__ = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
UpperCamelCase__ = 'A photo of sks dog in a bucket'
UpperCamelCase__ = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 110 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] =logging.get_logger(__name__)
A__ : List[Any] ={
'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __A ( _SCREAMING_SNAKE_CASE ):
lowerCamelCase ='''sew'''
def __init__( self : Any , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Union[str, Any]=7_68 , lowerCamelCase : List[str]=12 , lowerCamelCase : List[Any]=12 , lowerCamelCase : Union[str, Any]=30_72 , lowerCamelCase : int=2 , lowerCamelCase : Tuple="gelu" , lowerCamelCase : Tuple=0.1 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : Optional[int]=0.1 , lowerCamelCase : Any=0.0 , lowerCamelCase : Tuple=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : List[str]=0.02 , lowerCamelCase : Union[str, Any]=1e-5 , lowerCamelCase : Any="group" , lowerCamelCase : Optional[Any]="gelu" , lowerCamelCase : Optional[Any]=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , lowerCamelCase : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase : int=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase : str=False , lowerCamelCase : Dict=1_28 , lowerCamelCase : str=16 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Tuple=0.05 , lowerCamelCase : Optional[Any]=10 , lowerCamelCase : Union[str, Any]=2 , lowerCamelCase : int=0.0 , lowerCamelCase : str=10 , lowerCamelCase : int=0 , lowerCamelCase : List[Any]="mean" , lowerCamelCase : Optional[Any]=False , lowerCamelCase : Any=False , lowerCamelCase : int=2_56 , lowerCamelCase : Optional[int]=0 , lowerCamelCase : List[str]=1 , lowerCamelCase : List[Any]=2 , **lowerCamelCase : int , ):
"""simple docstring"""
super().__init__(**lowerCamelCase , pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase )
__A : Dict = hidden_size
__A : List[Any] = feat_extract_norm
__A : List[str] = feat_extract_activation
__A : Optional[Any] = list(lowerCamelCase )
__A : List[str] = list(lowerCamelCase )
__A : str = list(lowerCamelCase )
__A : Optional[Any] = conv_bias
__A : Union[str, Any] = num_conv_pos_embeddings
__A : Union[str, Any] = num_conv_pos_embedding_groups
__A : Tuple = len(self.conv_dim )
__A : Optional[int] = num_hidden_layers
__A : Dict = intermediate_size
__A : str = squeeze_factor
__A : str = hidden_act
__A : Dict = num_attention_heads
__A : Tuple = hidden_dropout
__A : str = attention_dropout
__A : Tuple = activation_dropout
__A : Any = feat_proj_dropout
__A : Union[str, Any] = final_dropout
__A : Any = layerdrop
__A : Any = layer_norm_eps
__A : Any = initializer_range
__A : Union[str, Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"
f"= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A : Tuple = apply_spec_augment
__A : List[str] = mask_time_prob
__A : List[str] = mask_time_length
__A : Tuple = mask_time_min_masks
__A : Optional[int] = mask_feature_prob
__A : Any = mask_feature_length
__A : Any = mask_feature_min_masks
# ctc loss
__A : str = ctc_loss_reduction
__A : Tuple = ctc_zero_infinity
# sequence classification
__A : List[str] = use_weighted_layer_sum
__A : Union[str, Any] = classifier_proj_size
@property
def lowercase_( self : Optional[Any] ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 499 |
'''simple docstring'''
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def A_ ( __SCREAMING_SNAKE_CASE : Dict ) -> Optional[int]:
"""simple docstring"""
if not is_accelerate_available():
return method
__A : int = version.parse(accelerate.__version__ ).base_version
if version.parse(__SCREAMING_SNAKE_CASE ) < version.parse("""0.17.0""" ):
return method
def wrapper(self : str , *__SCREAMING_SNAKE_CASE : Tuple , **__SCREAMING_SNAKE_CASE : Tuple ):
if hasattr(self , """_hf_hook""" ) and hasattr(self._hf_hook , """pre_forward""" ):
self._hf_hook.pre_forward(self )
return method(self , *__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
return wrapper
| 499 | 1 |
lowerCamelCase_ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 513 |
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowerCamelCase_ = logging.getLogger(__name__)
def __magic_name__ ( ):
'''simple docstring'''
UpperCamelCase__ = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=__a , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=__a , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=__a , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=__a , default="""data/dump""" , help="""The dump file prefix.""" )
UpperCamelCase__ = parser.parse_args()
logger.info(f"Loading Tokenizer ({args.tokenizer_name})" )
if args.tokenizer_type == "bert":
UpperCamelCase__ = BertTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]`
UpperCamelCase__ = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]`
elif args.tokenizer_type == "roberta":
UpperCamelCase__ = RobertaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ = tokenizer.special_tokens_map["""cls_token"""] # `<s>`
UpperCamelCase__ = tokenizer.special_tokens_map["""sep_token"""] # `</s>`
elif args.tokenizer_type == "gpt2":
UpperCamelCase__ = GPTaTokenizer.from_pretrained(args.tokenizer_name )
UpperCamelCase__ = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>`
UpperCamelCase__ = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>`
logger.info(f"Loading text from {args.file_path}" )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
UpperCamelCase__ = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f"{len(__a )} examples to process." )
UpperCamelCase__ = []
UpperCamelCase__ = 0
UpperCamelCase__ = 10_000
UpperCamelCase__ = time.time()
for text in data:
UpperCamelCase__ = f"{bos} {text.strip()} {sep}"
UpperCamelCase__ = tokenizer.encode(__a , add_special_tokens=__a )
rslt.append(__a )
iter += 1
if iter % interval == 0:
UpperCamelCase__ = time.time()
logger.info(f"{iter} examples processed. - {(end-start):.2f}s/{interval}expl" )
UpperCamelCase__ = time.time()
logger.info("""Finished binarization""" )
logger.info(f"{len(__a )} examples processed." )
UpperCamelCase__ = f"{args.dump_file}.{args.tokenizer_name}.pickle"
UpperCamelCase__ = tokenizer.vocab_size
if vocab_size < (1 << 16):
UpperCamelCase__ = [np.uintaa(__a ) for d in rslt]
else:
UpperCamelCase__ = [np.intaa(__a ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f"Dump to {dp_file}" )
with open(__a , """wb""" ) as handle:
pickle.dump(rslt_ , __a , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 513 | 1 |
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_UpperCamelCase: Optional[int] =datasets.utils.logging.get_logger(__name__)
class __lowercase( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
class __lowercase( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
UpperCamelCase_ = datasets.Audio()
UpperCamelCase_ = '''audio'''
UpperCamelCase_ = AudioFolderConfig
UpperCamelCase_ = 42 # definition at the bottom of the script
UpperCamelCase_ = AudioClassification(audio_column='''audio''' , label_column='''label''' )
_UpperCamelCase: str =[
'.aiff',
'.au',
'.avr',
'.caf',
'.flac',
'.htk',
'.svx',
'.mat4',
'.mat5',
'.mpc2k',
'.ogg',
'.paf',
'.pvf',
'.raw',
'.rf64',
'.sd2',
'.sds',
'.ircam',
'.voc',
'.w64',
'.wav',
'.nist',
'.wavex',
'.wve',
'.xi',
'.mp3',
'.opus',
]
_UpperCamelCase: Dict =AUDIO_EXTENSIONS
| 585 |
from PIL import Image
def _a ( __SCREAMING_SNAKE_CASE : Image ):
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = image.size
_lowerCAmelCase = 0
_lowerCAmelCase = image.load()
for i in range(__SCREAMING_SNAKE_CASE ):
for j in range(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(__SCREAMING_SNAKE_CASE ):
for i in range(__SCREAMING_SNAKE_CASE ):
_lowerCAmelCase = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
_UpperCamelCase: List[Any] =mean_threshold(Image.open('path_to_image').convert('L'))
image.save('output_image_path')
| 585 | 1 |
from __future__ import annotations
def a ( snake_case__: list , snake_case__: int , snake_case__: int , snake_case__: int ):
'''simple docstring'''
lowercase_ = []
lowercase_ , lowercase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowercase_ = result + left + right
return input_list
def a ( snake_case__: list ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return input_list
lowercase_ = list(snake_case__ )
# iteration for two-way merging
lowercase_ = 2
while p <= len(snake_case__ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(snake_case__ ) , snake_case__ ):
lowercase_ = i
lowercase_ = i + p - 1
lowercase_ = (low + high + 1) // 2
lowercase_ = merge(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
# final merge of last two parts
if p * 2 >= len(snake_case__ ):
lowercase_ = i
lowercase_ = merge(snake_case__ , 0 , snake_case__ , len(snake_case__ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__a = input('Enter numbers separated by a comma:\n').strip()
if user_input == "":
__a = []
else:
__a = [int(item.strip()) for item in user_input.split(',')]
print(iter_merge_sort(unsorted))
| 97 | import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
A_ = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", F"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", F"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qpos_proj.weight", F"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kpos_proj.weight", F"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.weight", F"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", F"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", F"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kpos_proj.weight", F"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.weight", F"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", F"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", F"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", F"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_qpos_proj.bias", F"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_kpos_proj.bias", F"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.bias", F"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", F"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", F"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_kpos_proj.bias", F"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.bias", F"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", F"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
("transformer.decoder.ref_point_head.layers.0.weight", "decoder.ref_point_head.layers.0.weight"),
("transformer.decoder.ref_point_head.layers.0.bias", "decoder.ref_point_head.layers.0.bias"),
("transformer.decoder.ref_point_head.layers.1.weight", "decoder.ref_point_head.layers.1.weight"),
("transformer.decoder.ref_point_head.layers.1.bias", "decoder.ref_point_head.layers.1.bias"),
("transformer.decoder.query_scale.layers.0.weight", "decoder.query_scale.layers.0.weight"),
("transformer.decoder.query_scale.layers.0.bias", "decoder.query_scale.layers.0.bias"),
("transformer.decoder.query_scale.layers.1.weight", "decoder.query_scale.layers.1.weight"),
("transformer.decoder.query_scale.layers.1.bias", "decoder.query_scale.layers.1.bias"),
("transformer.decoder.layers.0.ca_qpos_proj.weight", "decoder.layers.0.ca_qpos_proj.weight"),
("transformer.decoder.layers.0.ca_qpos_proj.bias", "decoder.layers.0.ca_qpos_proj.bias"),
]
)
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
def __UpperCAmelCase ( UpperCAmelCase )-> List[str]:
"""simple docstring"""
lowercase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
lowercase = key.replace('''backbone.0.body''', '''backbone.conv_encoder.model''' )
lowercase = value
else:
lowercase = value
return new_state_dict
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase=False )-> Tuple:
"""simple docstring"""
lowercase = ''''''
if is_panoptic:
lowercase = '''conditional_detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
lowercase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight' )
lowercase = state_dict.pop(f'{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
lowercase = in_proj_weight[:256, :]
lowercase = in_proj_bias[:256]
lowercase = in_proj_weight[256:512, :]
lowercase = in_proj_bias[256:512]
lowercase = in_proj_weight[-256:, :]
lowercase = in_proj_bias[-256:]
def __UpperCAmelCase ( )-> Any:
"""simple docstring"""
lowercase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase = Image.open(requests.get(UpperCAmelCase, stream=UpperCAmelCase ).raw )
return im
@torch.no_grad()
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Tuple:
"""simple docstring"""
lowercase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
lowercase = '''resnet101'''
if "dc5" in model_name:
lowercase = True
lowercase = '''panoptic''' in model_name
if is_panoptic:
lowercase = 250
else:
lowercase = 91
lowercase = '''huggingface/label-files'''
lowercase = '''coco-detection-id2label.json'''
lowercase = json.load(open(hf_hub_download(UpperCAmelCase, UpperCAmelCase, repo_type='''dataset''' ), '''r''' ) )
lowercase = {int(UpperCAmelCase ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# load image processor
lowercase = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
lowercase = ConditionalDetrImageProcessor(format=UpperCAmelCase )
# prepare image
lowercase = prepare_img()
lowercase = image_processor(images=UpperCAmelCase, return_tensors='''pt''' )
lowercase = encoding['''pixel_values''']
logger.info(f'Converting model {model_name}...' )
# load original model from torch hub
lowercase = torch.hub.load('''DeppMeng/ConditionalDETR''', UpperCAmelCase, pretrained=UpperCAmelCase ).eval()
lowercase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
lowercase = '''conditional_detr.''' + src
rename_key(UpperCAmelCase, UpperCAmelCase, UpperCAmelCase )
lowercase = rename_backbone_keys(UpperCAmelCase )
# query, key and value matrices need special treatment
read_in_q_k_v(UpperCAmelCase, is_panoptic=UpperCAmelCase )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
lowercase = '''conditional_detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''conditional_detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
lowercase = state_dict.pop(UpperCAmelCase )
lowercase = val
# finally, create HuggingFace model and load state dict
lowercase = ConditionalDetrForSegmentation(UpperCAmelCase ) if is_panoptic else ConditionalDetrForObjectDetection(UpperCAmelCase )
model.load_state_dict(UpperCAmelCase )
model.eval()
model.push_to_hub(repo_id=UpperCAmelCase, organization='''DepuMeng''', commit_message='''Add model''' )
# verify our conversion
lowercase = conditional_detr(UpperCAmelCase )
lowercase = model(UpperCAmelCase )
assert torch.allclose(outputs.logits, original_outputs['''pred_logits'''], atol=1e-4 )
assert torch.allclose(outputs.pred_boxes, original_outputs['''pred_boxes'''], atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks, original_outputs['''pred_masks'''], atol=1e-4 )
# Save model and image processor
logger.info(f'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase )
model.save_pretrained(UpperCAmelCase )
image_processor.save_pretrained(UpperCAmelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="conditional_detr_resnet50",
type=str,
help="Name of the CONDITIONAL_DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A_ = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 604 | 0 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
def __init__( self : int , lowercase_ : List[str] , lowercase_ : Any=13 , lowercase_ : Optional[Any]=30 , lowercase_ : Union[str, Any]=2 , lowercase_ : List[str]=3 , lowercase_ : Union[str, Any]=True , lowercase_ : Any=True , lowercase_ : str=32 , lowercase_ : Any=5 , lowercase_ : Optional[int]=4 , lowercase_ : List[str]=37 , lowercase_ : List[str]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : Union[str, Any]=None , lowercase_ : str=2 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = image_size
snake_case_ = patch_size
snake_case_ = num_channels
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = scope
snake_case_ = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case_ = (image_size // patch_size) ** 2
snake_case_ = num_patches + 1
def A_ ( self : str ):
snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = self.get_config()
return config, pixel_values, labels
def A_ ( self : Any ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : str , lowercase_ : List[Any] ):
snake_case_ = ViTModel(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A_ ( self : int , lowercase_ : Any , lowercase_ : Any , lowercase_ : Dict ):
snake_case_ = ViTForMaskedImageModeling(config=UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = ViTForMaskedImageModeling(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A_ ( self : str , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : Dict ):
snake_case_ = self.type_sequence_label_size
snake_case_ = ViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = model(UpperCAmelCase_ , labels=UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case_ = 1
snake_case_ = ViTForImageClassification(UpperCAmelCase_ )
model.to(UpperCAmelCase_ )
model.eval()
snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case_ = model(UpperCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A_ ( self : List[Any] ):
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) ,(
snake_case_
) ,(
snake_case_
) ,
) = config_and_inputs
snake_case_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
snake_case_ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case_ = (
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
def A_ ( self : Optional[Any] ):
snake_case_ = ViTModelTester(self )
snake_case_ = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=37 )
def A_ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def A_ ( self : Optional[int] ):
pass
def A_ ( self : Union[str, Any] ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear ) )
def A_ ( self : Union[str, Any] ):
snake_case_ ,snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case_ = model_class(UpperCAmelCase_ )
snake_case_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case_ = [*signature.parameters.keys()]
snake_case_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_ )
def A_ ( self : Any ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_ )
def A_ ( self : List[str] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_ )
def A_ ( self : Optional[int] ):
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_ )
@slow
def A_ ( self : Optional[int] ):
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = ViTModel.from_pretrained(UpperCAmelCase_ )
self.assertIsNotNone(UpperCAmelCase_ )
def __magic_name__ ( ) -> Any:
'''simple docstring'''
snake_case_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
@cached_property
def A_ ( self : Dict ):
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def A_ ( self : Tuple ):
snake_case_ = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(UpperCAmelCase_ )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' ).to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(**UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
def A_ ( self : Dict ):
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
snake_case_ = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(UpperCAmelCase_ )
snake_case_ = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 )
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' )
snake_case_ = inputs.pixel_values.to(UpperCAmelCase_ )
# forward pass
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_ )
# verify the logits
snake_case_ = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_ )
snake_case_ = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(UpperCAmelCase_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A_ ( self : List[str] ):
snake_case_ = ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
snake_case_ = self.default_image_processor
snake_case_ = prepare_img()
snake_case_ = image_processor(images=UpperCAmelCase_ , return_tensors='''pt''' )
snake_case_ = inputs.pixel_values.to(UpperCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
snake_case_ = model(UpperCAmelCase_ )
| 711 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a : List[Any] = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['MobileViTFeatureExtractor']
a : Dict = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
a : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 593 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __UpperCamelCase ( __lowerCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple =MobileBertTokenizer
lowerCamelCase : Optional[int] =MobileBertTokenizerFast
lowerCamelCase : Tuple =True
lowerCamelCase : int =True
lowerCamelCase : Any =filter_non_english
lowerCamelCase : List[str] ="""google/mobilebert-uncased"""
def __a ( self ) -> Union[str, Any]:
super().setUp()
a : Dict = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
a : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
a : List[Any] = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def __a ( self , lowerCAmelCase__ ) -> Optional[int]:
a : List[Any] = """UNwant\u00E9d,running"""
a : str = """unwanted, running"""
return input_text, output_text
def __a ( self ) -> List[Any]:
a : Dict = self.tokenizer_class(self.vocab_file )
a : List[Any] = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(snake_case_ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [9, 6, 7, 12, 10, 11] )
def __a ( self ) -> Tuple:
if not self.test_rust_tokenizer:
return
a : Dict = self.get_tokenizer()
a : Union[str, Any] = self.get_rust_tokenizer()
a : Union[str, Any] = """UNwant\u00E9d,running"""
a : Optional[Any] = tokenizer.tokenize(snake_case_ )
a : Dict = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
a : List[Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
a : Tuple = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
a : str = self.get_rust_tokenizer()
a : str = tokenizer.encode(snake_case_ )
a : Tuple = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
# With lower casing
a : int = self.get_tokenizer(do_lower_case=snake_case_ )
a : int = self.get_rust_tokenizer(do_lower_case=snake_case_ )
a : List[Any] = """UNwant\u00E9d,running"""
a : Any = tokenizer.tokenize(snake_case_ )
a : List[Any] = rust_tokenizer.tokenize(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
a : Union[str, Any] = tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
a : Tuple = rust_tokenizer.encode(snake_case_ , add_special_tokens=snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
a : int = self.get_rust_tokenizer()
a : Optional[int] = tokenizer.encode(snake_case_ )
a : List[str] = rust_tokenizer.encode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __a ( self ) -> Optional[Any]:
a : Dict = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def __a ( self ) -> List[Any]:
a : Optional[int] = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> List[Any]:
a : Dict = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def __a ( self ) -> Union[str, Any]:
a : int = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> str:
a : Any = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def __a ( self ) -> Union[str, Any]:
a : int = BasicTokenizer(do_lower_case=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Union[str, Any]:
a : Any = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> Dict:
a : Optional[Any] = BasicTokenizer(do_lower_case=snake_case_ , strip_accents=snake_case_ )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def __a ( self ) -> str:
a : Dict = BasicTokenizer(do_lower_case=snake_case_ , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def __a ( self ) -> str:
a : List[Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
a : Any = {}
for i, token in enumerate(snake_case_ ):
a : Tuple = i
a : List[Any] = WordpieceTokenizer(vocab=snake_case_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def __a ( self ) -> List[str]:
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def __a ( self ) -> Union[str, Any]:
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def __a ( self ) -> Optional[Any]:
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def __a ( self ) -> Any:
a : Dict = self.get_tokenizer()
a : Optional[int] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(snake_case_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
self.assertListEqual(
[rust_tokenizer.tokenize(snake_case_ ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
@slow
def __a ( self ) -> Optional[Any]:
a : str = self.tokenizer_class.from_pretrained("google/mobilebert-uncased" )
a : List[Any] = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
a : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
a : Any = tokenizer.build_inputs_with_special_tokens(snake_case_ )
a : Tuple = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def __a ( self ) -> str:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Tuple = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
a : Union[str, Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
a : Dict = tokenizer_r.encode_plus(
snake_case_ , return_attention_mask=snake_case_ , return_token_type_ids=snake_case_ , return_offsets_mapping=snake_case_ , add_special_tokens=snake_case_ , )
a : str = tokenizer_r.do_lower_case if hasattr(snake_case_ , "do_lower_case" ) else False
a : str = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def __a ( self ) -> List[str]:
a : Optional[int] = ["""的""", """人""", """有"""]
a : Union[str, Any] = """""".join(snake_case_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : Dict = True
a : Any = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
a : Optional[Any] = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
a : List[str] = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_ )
a : str = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_ )
a : Optional[Any] = tokenizer_r.convert_ids_to_tokens(snake_case_ )
a : Any = tokenizer_p.convert_ids_to_tokens(snake_case_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
a : Optional[int] = False
a : Any = self.rust_tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
a : List[Any] = self.tokenizer_class.from_pretrained(snake_case_ , **snake_case_ )
a : Tuple = tokenizer_r.encode(snake_case_ , add_special_tokens=snake_case_ )
a : List[str] = tokenizer_p.encode(snake_case_ , add_special_tokens=snake_case_ )
a : List[str] = tokenizer_r.convert_ids_to_tokens(snake_case_ )
a : Optional[Any] = tokenizer_p.convert_ids_to_tokens(snake_case_ )
# it is expected that only the first Chinese character is not preceded by "##".
a : Tuple = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(snake_case_ )
]
self.assertListEqual(snake_case_ , snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
| 633 | '''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
SCREAMING_SNAKE_CASE_ = ['text', 'image', 'audio']
def UpperCamelCase__ ( _lowercase : List[str] ) -> Tuple:
__UpperCAmelCase: Union[str, Any] = []
for input_type in input_types:
if input_type == "text":
inputs.append("""Text input""" )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir("""fixtures/tests_samples/COCO""" ) ) / """000000039769.png""" ).resize((5_1_2, 5_1_2) ) )
elif input_type == "audio":
inputs.append(torch.ones(3_0_0_0 ) )
elif isinstance(_lowercase , _lowercase ):
inputs.append(create_inputs(_lowercase ) )
else:
raise ValueError(F'''Invalid type requested: {input_type}''' )
return inputs
def UpperCamelCase__ ( _lowercase : List ) -> List[str]:
__UpperCAmelCase: str = []
for output in outputs:
if isinstance(_lowercase , (str, AgentText) ):
output_types.append("""text""" )
elif isinstance(_lowercase , (Image.Image, AgentImage) ):
output_types.append("""image""" )
elif isinstance(_lowercase , (torch.Tensor, AgentAudio) ):
output_types.append("""audio""" )
else:
raise ValueError(F'''Invalid output: {output}''' )
return output_types
@is_tool_test
class a :
"""simple docstring"""
def lowercase_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """inputs""" ) )
self.assertTrue(hasattr(self.tool , """outputs""" ) )
__UpperCAmelCase: Optional[Any] = self.tool.inputs
for _input in inputs:
if isinstance(_input , snake_case_ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
__UpperCAmelCase: Dict = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = create_inputs(self.tool.inputs )
__UpperCAmelCase: Any = self.tool(*snake_case_ )
# There is a single output
if len(self.tool.outputs ) == 1:
__UpperCAmelCase: Union[str, Any] = [outputs]
self.assertListEqual(output_types(snake_case_ ) , self.tool.outputs )
def lowercase_ ( self ):
'''simple docstring'''
self.assertTrue(hasattr(self.tool , """description""" ) )
self.assertTrue(hasattr(self.tool , """default_checkpoint""" ) )
self.assertTrue(self.tool.description.startswith("""This is a tool that""" ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = create_inputs(self.tool.inputs )
__UpperCAmelCase: Optional[int] = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase: Tuple = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) )
for output, output_type in zip(snake_case_ , self.tool.outputs ):
__UpperCAmelCase: List[Any] = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(snake_case_ , snake_case_ ) )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = create_inputs(self.tool.inputs )
__UpperCAmelCase: Optional[int] = []
for _input, input_type in zip(snake_case_ , self.tool.inputs ):
if isinstance(snake_case_ , snake_case_ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
__UpperCAmelCase: int = self.tool(*snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
__UpperCAmelCase: Union[str, Any] = [outputs]
self.assertEqual(len(snake_case_ ) , len(self.tool.outputs ) ) | 523 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def _UpperCamelCase ( UpperCamelCase_ : Tuple , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[Any] ) -> Tuple:
"""simple docstring"""
if isinstance(UpperCamelCase_ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase_ , PIL.Image.Image ):
lowerCAmelCase__ = [image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCAmelCase__ = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
lowerCAmelCase__ = np.concatenate(UpperCamelCase_ , axis=0 )
lowerCAmelCase__ = np.array(UpperCamelCase_ ).astype(np.floataa ) / 255.0
lowerCAmelCase__ = image.transpose(0 , 3 , 1 , 2 )
lowerCAmelCase__ = 2.0 * image - 1.0
lowerCAmelCase__ = torch.from_numpy(UpperCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
lowerCAmelCase__ = torch.cat(UpperCamelCase_ , dim=0 )
return image
def _UpperCamelCase ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict=0.9995 ) -> Tuple:
"""simple docstring"""
if not isinstance(UpperCamelCase_ , np.ndarray ):
lowerCAmelCase__ = True
lowerCAmelCase__ = va.device
lowerCAmelCase__ = va.cpu().numpy()
lowerCAmelCase__ = va.cpu().numpy()
lowerCAmelCase__ = np.sum(va * va / (np.linalg.norm(UpperCamelCase_ ) * np.linalg.norm(UpperCamelCase_ )) )
if np.abs(UpperCamelCase_ ) > DOT_THRESHOLD:
lowerCAmelCase__ = (1 - t) * va + t * va
else:
lowerCAmelCase__ = np.arccos(UpperCamelCase_ )
lowerCAmelCase__ = np.sin(UpperCamelCase_ )
lowerCAmelCase__ = theta_a * t
lowerCAmelCase__ = np.sin(UpperCamelCase_ )
lowerCAmelCase__ = np.sin(theta_a - theta_t ) / sin_theta_a
lowerCAmelCase__ = sin_theta_t / sin_theta_a
lowerCAmelCase__ = sa * va + sa * va
if inputs_are_torch:
lowerCAmelCase__ = torch.from_numpy(UpperCamelCase_ ).to(UpperCamelCase_ )
return va
def _UpperCamelCase ( UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = F.normalize(UpperCamelCase_ , dim=-1 )
lowerCAmelCase__ = F.normalize(UpperCamelCase_ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def _UpperCamelCase ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : Union[str, Any] ) -> int:
"""simple docstring"""
for param in model.parameters():
lowerCAmelCase__ = value
class __SCREAMING_SNAKE_CASE ( __lowercase):
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , ):
"""simple docstring"""
super().__init__()
self.register_modules(
vae=_UpperCamelCase , text_encoder=_UpperCamelCase , clip_model=_UpperCamelCase , tokenizer=_UpperCamelCase , unet=_UpperCamelCase , scheduler=_UpperCamelCase , feature_extractor=_UpperCamelCase , coca_model=_UpperCamelCase , coca_tokenizer=_UpperCamelCase , coca_transform=_UpperCamelCase , )
lowerCAmelCase__ = (
feature_extractor.size
if isinstance(feature_extractor.size , _UpperCamelCase )
else feature_extractor.size['shortest_edge']
)
lowerCAmelCase__ = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std )
set_requires_grad(self.text_encoder , _UpperCamelCase )
set_requires_grad(self.clip_model , _UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCAmelCase__ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.enable_attention_slicing(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.vae , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.vae , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.unet , _UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
set_requires_grad(self.unet , _UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
# get the original timestep using init_timestep
lowerCAmelCase__ = min(int(num_inference_steps * strength ) , _UpperCamelCase )
lowerCAmelCase__ = max(num_inference_steps - init_timestep , 0 )
lowerCAmelCase__ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
"""simple docstring"""
if not isinstance(_UpperCamelCase , torch.Tensor ):
raise ValueError(F"`image` has to be of type `torch.Tensor` but is {type(_UpperCamelCase )}" )
lowerCAmelCase__ = image.to(device=_UpperCamelCase , dtype=_UpperCamelCase )
if isinstance(_UpperCamelCase , _UpperCamelCase ):
lowerCAmelCase__ = [
self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(_UpperCamelCase )
]
lowerCAmelCase__ = torch.cat(_UpperCamelCase , dim=0 )
else:
lowerCAmelCase__ = self.vae.encode(_UpperCamelCase ).latent_dist.sample(_UpperCamelCase )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCAmelCase__ = 0.1_82_15 * init_latents
lowerCAmelCase__ = init_latents.repeat_interleave(_UpperCamelCase , dim=0 )
lowerCAmelCase__ = randn_tensor(init_latents.shape , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
# get latents
lowerCAmelCase__ = self.scheduler.add_noise(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = init_latents
return latents
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.coca_transform(_UpperCamelCase ).unsqueeze(0 )
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCAmelCase__ = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) )
lowerCAmelCase__ = self.coca_tokenizer.decode(generated[0].cpu().numpy() )
return generated.split('<end_of_text>' )[0].replace('<start_of_text>' , '' ).rstrip(' .,' )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self.feature_extractor.preprocess(_UpperCamelCase )
lowerCAmelCase__ = torch.from_numpy(clip_image_input['pixel_values'][0] ).unsqueeze(0 ).to(self.device ).half()
lowerCAmelCase__ = self.clip_model.get_image_features(_UpperCamelCase )
lowerCAmelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCamelCase )
lowerCAmelCase__ = image_embeddings_clip.repeat_interleave(_UpperCamelCase , dim=0 )
return image_embeddings_clip
@torch.enable_grad()
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
"""simple docstring"""
lowerCAmelCase__ = latents.detach().requires_grad_()
lowerCAmelCase__ = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
# predict the noise residual
lowerCAmelCase__ = self.unet(_UpperCamelCase , _UpperCamelCase , encoder_hidden_states=_UpperCamelCase ).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ):
lowerCAmelCase__ = self.scheduler.alphas_cumprod[timestep]
lowerCAmelCase__ = 1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCAmelCase__ = torch.sqrt(_UpperCamelCase )
lowerCAmelCase__ = pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , _UpperCamelCase ):
lowerCAmelCase__ = self.scheduler.sigmas[index]
lowerCAmelCase__ = latents - sigma * noise_pred
else:
raise ValueError(F"scheduler type {type(self.scheduler )} not supported" )
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCAmelCase__ = 1 / 0.1_82_15 * sample
lowerCAmelCase__ = self.vae.decode(_UpperCamelCase ).sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ = transforms.Resize(self.feature_extractor_size )(_UpperCamelCase )
lowerCAmelCase__ = self.normalize(_UpperCamelCase ).to(latents.dtype )
lowerCAmelCase__ = self.clip_model.get_image_features(_UpperCamelCase )
lowerCAmelCase__ = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=_UpperCamelCase )
lowerCAmelCase__ = spherical_dist_loss(_UpperCamelCase , _UpperCamelCase ).mean() * clip_guidance_scale
lowerCAmelCase__ = -torch.autograd.grad(_UpperCamelCase , _UpperCamelCase )[0]
if isinstance(self.scheduler , _UpperCamelCase ):
lowerCAmelCase__ = latents.detach() + grads * (sigma**2)
lowerCAmelCase__ = noise_pred_original
else:
lowerCAmelCase__ = noise_pred_original - torch.sqrt(_UpperCamelCase ) * grads
return noise_pred, latents
@torch.no_grad()
def __call__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 5_12 , _UpperCamelCase = 5_12 , _UpperCamelCase = 0.6 , _UpperCamelCase = 50 , _UpperCamelCase = 7.5 , _UpperCamelCase = 1 , _UpperCamelCase = 0.0 , _UpperCamelCase = 1_00 , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , _UpperCamelCase = 0.8 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , ):
"""simple docstring"""
if isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) != batch_size:
raise ValueError(F"You have passed {batch_size} batch_size, but only {len(_UpperCamelCase )} generators." )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if isinstance(_UpperCamelCase , torch.Generator ) and batch_size > 1:
lowerCAmelCase__ = [generator] + [None] * (batch_size - 1)
lowerCAmelCase__ = [
('model', self.coca_model is None),
('tokenizer', self.coca_tokenizer is None),
('transform', self.coca_transform is None),
]
lowerCAmelCase__ = [x[0] for x in coca_is_none if x[1]]
lowerCAmelCase__ = ', '.join(_UpperCamelCase )
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(_UpperCamelCase ):
raise ValueError(
F"Content prompt is None and CoCa [{coca_is_none_str}] is None."
F"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowerCAmelCase__ = self.get_image_description(_UpperCamelCase )
if style_prompt is None:
if len(_UpperCamelCase ):
raise ValueError(
F"Style prompt is None and CoCa [{coca_is_none_str}] is None."
F" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline." )
lowerCAmelCase__ = self.get_image_description(_UpperCamelCase )
# get prompt text embeddings for content and style
lowerCAmelCase__ = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCamelCase , return_tensors='pt' , )
lowerCAmelCase__ = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0]
lowerCAmelCase__ = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , truncation=_UpperCamelCase , return_tensors='pt' , )
lowerCAmelCase__ = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0]
lowerCAmelCase__ = slerp(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# duplicate text embeddings for each generation per prompt
lowerCAmelCase__ = text_embeddings.repeat_interleave(_UpperCamelCase , dim=0 )
# set timesteps
lowerCAmelCase__ = 'offset' in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() )
lowerCAmelCase__ = {}
if accepts_offset:
lowerCAmelCase__ = 1
self.scheduler.set_timesteps(_UpperCamelCase , **_UpperCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device )
lowerCAmelCase__ , lowerCAmelCase__ = self.get_timesteps(_UpperCamelCase , _UpperCamelCase , self.device )
lowerCAmelCase__ = timesteps[:1].repeat(_UpperCamelCase )
# Preprocess image
lowerCAmelCase__ = preprocess(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , text_embeddings.dtype , self.device , _UpperCamelCase )
lowerCAmelCase__ = preprocess(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = self.prepare_latents(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , text_embeddings.dtype , self.device , _UpperCamelCase )
lowerCAmelCase__ = slerp(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if clip_guidance_scale > 0:
lowerCAmelCase__ = self.get_clip_image_embeddings(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = self.get_clip_image_embeddings(_UpperCamelCase , _UpperCamelCase )
lowerCAmelCase__ = slerp(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCAmelCase__ = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase__ = content_text_input.input_ids.shape[-1]
lowerCAmelCase__ = self.tokenizer([''] , padding='max_length' , max_length=_UpperCamelCase , return_tensors='pt' )
lowerCAmelCase__ = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCAmelCase__ = uncond_embeddings.repeat_interleave(_UpperCamelCase , dim=0 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCAmelCase__ = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCAmelCase__ = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCAmelCase__ = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCAmelCase__ = torch.randn(_UpperCamelCase , generator=_UpperCamelCase , device='cpu' , dtype=_UpperCamelCase ).to(
self.device )
else:
lowerCAmelCase__ = torch.randn(_UpperCamelCase , generator=_UpperCamelCase , device=self.device , dtype=_UpperCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
lowerCAmelCase__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ = {}
if accepts_eta:
lowerCAmelCase__ = eta
# check if the scheduler accepts generator
lowerCAmelCase__ = 'generator' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
if accepts_generator:
lowerCAmelCase__ = generator
with self.progress_bar(total=_UpperCamelCase ):
for i, t in enumerate(_UpperCamelCase ):
# expand the latents if we are doing classifier free guidance
lowerCAmelCase__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
lowerCAmelCase__ = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
# predict the noise residual
lowerCAmelCase__ = self.unet(_UpperCamelCase , _UpperCamelCase , encoder_hidden_states=_UpperCamelCase ).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCAmelCase__ , lowerCAmelCase__ = noise_pred.chunk(2 )
lowerCAmelCase__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCAmelCase__ = (
text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings
)
lowerCAmelCase__ , lowerCAmelCase__ = self.cond_fn(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , )
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ = self.scheduler.step(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCAmelCase__ = 1 / 0.1_82_15 * latents
lowerCAmelCase__ = self.vae.decode(_UpperCamelCase ).sample
lowerCAmelCase__ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCAmelCase__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=_UpperCamelCase , nsfw_content_detected=_UpperCamelCase )
| 365 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
__snake_case : str = (7_20, 12_80) # Height, Width
__snake_case : Dict = (0.4, 0.6) # if height or width lower than this scale, drop it.
__snake_case : Dict = 1 / 1_00
__snake_case : Optional[int] = """"""
__snake_case : Union[str, Any] = """"""
__snake_case : List[str] = """"""
__snake_case : List[str] = 2_50
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = get_dataset(UpperCamelCase_ , UpperCamelCase_ )
for index in range(UpperCamelCase_ ):
lowerCAmelCase__ = random.sample(range(len(UpperCamelCase_ ) ) , 4 )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = update_image_and_anno(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , filter_scale=UpperCamelCase_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
lowerCAmelCase__ = random_chars(32 )
lowerCAmelCase__ = path.split(os.sep )[-1].rsplit('.' , 1 )[0]
lowerCAmelCase__ = F"{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"
cva.imwrite(F"{file_root}.jpg" , UpperCamelCase_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}" )
lowerCAmelCase__ = []
for anno in new_annos:
lowerCAmelCase__ = anno[3] - anno[1]
lowerCAmelCase__ = anno[4] - anno[2]
lowerCAmelCase__ = anno[1] + width / 2
lowerCAmelCase__ = anno[2] + height / 2
lowerCAmelCase__ = F"{anno[0]} {x_center} {y_center} {width} {height}"
annos_list.append(UpperCamelCase_ )
with open(F"{file_root}.txt" , 'w' ) as outfile:
outfile.write('\n'.join(line for line in annos_list ) )
def _UpperCamelCase ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> tuple[list, list]:
"""simple docstring"""
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for label_file in glob.glob(os.path.join(UpperCamelCase_ , '*.txt' ) ):
lowerCAmelCase__ = label_file.split(os.sep )[-1].rsplit('.' , 1 )[0]
with open(UpperCamelCase_ ) as in_file:
lowerCAmelCase__ = in_file.readlines()
lowerCAmelCase__ = os.path.join(UpperCamelCase_ , F"{label_name}.jpg" )
lowerCAmelCase__ = []
for obj_list in obj_lists:
lowerCAmelCase__ = obj_list.rstrip('\n' ).split(' ' )
lowerCAmelCase__ = float(obj[1] ) - float(obj[3] ) / 2
lowerCAmelCase__ = float(obj[2] ) - float(obj[4] ) / 2
lowerCAmelCase__ = float(obj[1] ) + float(obj[3] ) / 2
lowerCAmelCase__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(UpperCamelCase_ )
labels.append(UpperCamelCase_ )
return img_paths, labels
def _UpperCamelCase ( UpperCamelCase_ : list , UpperCamelCase_ : list , UpperCamelCase_ : list[int] , UpperCamelCase_ : tuple[int, int] , UpperCamelCase_ : tuple[float, float] , UpperCamelCase_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
lowerCAmelCase__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
lowerCAmelCase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
lowerCAmelCase__ = int(scale_x * output_size[1] )
lowerCAmelCase__ = int(scale_y * output_size[0] )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for i, index in enumerate(UpperCamelCase_ ):
lowerCAmelCase__ = all_img_list[index]
path_list.append(UpperCamelCase_ )
lowerCAmelCase__ = all_annos[index]
lowerCAmelCase__ = cva.imread(UpperCamelCase_ )
if i == 0: # top-left
lowerCAmelCase__ = cva.resize(UpperCamelCase_ , (divid_point_x, divid_point_y) )
lowerCAmelCase__ = img
for bbox in img_annos:
lowerCAmelCase__ = bbox[1] * scale_x
lowerCAmelCase__ = bbox[2] * scale_y
lowerCAmelCase__ = bbox[3] * scale_x
lowerCAmelCase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
lowerCAmelCase__ = cva.resize(UpperCamelCase_ , (output_size[1] - divid_point_x, divid_point_y) )
lowerCAmelCase__ = img
for bbox in img_annos:
lowerCAmelCase__ = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase__ = bbox[2] * scale_y
lowerCAmelCase__ = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
lowerCAmelCase__ = cva.resize(UpperCamelCase_ , (divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase__ = img
for bbox in img_annos:
lowerCAmelCase__ = bbox[1] * scale_x
lowerCAmelCase__ = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase__ = bbox[3] * scale_x
lowerCAmelCase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
lowerCAmelCase__ = cva.resize(
UpperCamelCase_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
lowerCAmelCase__ = img
for bbox in img_annos:
lowerCAmelCase__ = scale_x + bbox[1] * (1 - scale_x)
lowerCAmelCase__ = scale_y + bbox[2] * (1 - scale_y)
lowerCAmelCase__ = scale_x + bbox[3] * (1 - scale_x)
lowerCAmelCase__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
lowerCAmelCase__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def _UpperCamelCase ( UpperCamelCase_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
lowerCAmelCase__ = ascii_lowercase + digits
return "".join(random.choice(UpperCamelCase_ ) for _ in range(UpperCamelCase_ ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 365 | 1 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
__UpperCamelCase : List[Any] = {
'susnato/ernie-m-base_pytorch': 'https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json',
'susnato/ernie-m-large_pytorch': 'https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json',
}
class _UpperCamelCase ( A ):
'''simple docstring'''
a_ : int = "ernie_m"
a_ : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : Optional[Any] , _lowerCamelCase : int = 2_5_0_0_0_2 , _lowerCamelCase : int = 7_6_8 , _lowerCamelCase : int = 1_2 , _lowerCamelCase : int = 1_2 , _lowerCamelCase : int = 3_0_7_2 , _lowerCamelCase : str = "gelu" , _lowerCamelCase : float = 0.1 , _lowerCamelCase : float = 0.1 , _lowerCamelCase : int = 5_1_4 , _lowerCamelCase : float = 0.02 , _lowerCamelCase : int = 1 , _lowerCamelCase : float = 1E-05 , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : str=False , _lowerCamelCase : Tuple=0.0 , **_lowerCamelCase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=_lowerCamelCase , **_lowerCamelCase )
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : List[str] = hidden_size
__lowerCamelCase : Any = num_hidden_layers
__lowerCamelCase : Optional[int] = num_attention_heads
__lowerCamelCase : Tuple = intermediate_size
__lowerCamelCase : Any = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : List[Any] = attention_probs_dropout_prob
__lowerCamelCase : List[Any] = max_position_embeddings
__lowerCamelCase : Optional[Any] = initializer_range
__lowerCamelCase : Tuple = layer_norm_eps
__lowerCamelCase : List[str] = classifier_dropout
__lowerCamelCase : str = is_decoder
__lowerCamelCase : Optional[Any] = act_dropout
| 519 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _UpperCamelCase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
'''simple docstring'''
def __init__( self : Optional[Any] , _lowerCamelCase : float , _lowerCamelCase : Callable , _lowerCamelCase : int , _lowerCamelCase : float = 1.0 , _lowerCamelCase : str = None , ):
'''simple docstring'''
super().__init__()
__lowerCamelCase : Dict = initial_learning_rate
__lowerCamelCase : Any = warmup_steps
__lowerCamelCase : Optional[int] = power
__lowerCamelCase : str = decay_schedule_fn
__lowerCamelCase : Union[str, Any] = name
def __call__( self : List[str] , _lowerCamelCase : int ):
'''simple docstring'''
with tf.name_scope(self.name or """WarmUp""" ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
__lowerCamelCase : Dict = tf.cast(_lowerCamelCase , tf.floataa )
__lowerCamelCase : Optional[Any] = tf.cast(self.warmup_steps , tf.floataa )
__lowerCamelCase : List[str] = global_step_float / warmup_steps_float
__lowerCamelCase : List[str] = self.initial_learning_rate * tf.math.pow(_lowerCamelCase , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=_lowerCamelCase , )
def _snake_case ( self : Any ):
'''simple docstring'''
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def _UpperCAmelCase ( UpperCAmelCase : float , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 0.9 , UpperCAmelCase : float = 0.9_9_9 , UpperCAmelCase : float = 1e-8 , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : float = 0.0 , UpperCAmelCase : float = 1.0 , UpperCAmelCase : Optional[List[str]] = None , ):
"""simple docstring"""
__lowerCamelCase : str = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCAmelCase , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCAmelCase , )
if num_warmup_steps:
__lowerCamelCase : str = WarmUp(
initial_learning_rate=UpperCAmelCase , decay_schedule_fn=UpperCAmelCase , warmup_steps=UpperCAmelCase , )
if weight_decay_rate > 0.0:
__lowerCamelCase : List[Any] = AdamWeightDecay(
learning_rate=UpperCAmelCase , weight_decay_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , exclude_from_weight_decay=["""LayerNorm""", """layer_norm""", """bias"""] , include_in_weight_decay=UpperCAmelCase , )
else:
__lowerCamelCase : Optional[int] = tf.keras.optimizers.Adam(
learning_rate=UpperCAmelCase , beta_a=UpperCAmelCase , beta_a=UpperCAmelCase , epsilon=UpperCAmelCase , clipnorm=UpperCAmelCase , global_clipnorm=UpperCAmelCase , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCamelCase : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , _lowerCamelCase : float = 0.9 , _lowerCamelCase : float = 0.999 , _lowerCamelCase : float = 1E-7 , _lowerCamelCase : bool = False , _lowerCamelCase : float = 0.0 , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : Optional[List[str]] = None , _lowerCamelCase : str = "AdamWeightDecay" , **_lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
__lowerCamelCase : Union[str, Any] = weight_decay_rate
__lowerCamelCase : Tuple = include_in_weight_decay
__lowerCamelCase : Optional[Any] = exclude_from_weight_decay
@classmethod
def _snake_case ( cls : Union[str, Any] , _lowerCamelCase : List[Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = {"""WarmUp""": WarmUp}
return super(_lowerCamelCase , cls ).from_config(_lowerCamelCase , custom_objects=_lowerCamelCase )
def _snake_case ( self : str , _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
super(_lowerCamelCase , self )._prepare_local(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Optional[Any] = tf.constant(
self.weight_decay_rate , name="""adam_weight_decay_rate""" )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]["""weight_decay_rate"""] , use_locking=self._use_locking , )
return tf.no_op()
def _snake_case ( self : Tuple , _lowerCamelCase : Tuple , _lowerCamelCase : str=None , **_lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Union[str, Any] = list(zip(*_lowerCamelCase ) )
return super(_lowerCamelCase , self ).apply_gradients(zip(_lowerCamelCase , _lowerCamelCase ) , name=_lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : int , _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ):
'''simple docstring'''
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
__lowerCamelCase : Union[str, Any] = apply_state or {}
__lowerCamelCase : Union[str, Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
__lowerCamelCase : List[str] = self._fallback_apply_state(_lowerCamelCase , _lowerCamelCase )
__lowerCamelCase : Dict = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def _snake_case ( self : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict=None ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : Optional[Any] = self._get_lr(var.device , var.dtype.base_dtype , _lowerCamelCase )
__lowerCamelCase : Dict = self._decay_weights_op(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCamelCase , self )._resource_apply_dense(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Tuple=None ):
'''simple docstring'''
__lowerCamelCase , __lowerCamelCase : int = self._get_lr(var.device , var.dtype.base_dtype , _lowerCamelCase )
__lowerCamelCase : str = self._decay_weights_op(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with tf.control_dependencies([decay] ):
return super(_lowerCamelCase , self )._resource_apply_sparse(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
def _snake_case ( self : int ):
'''simple docstring'''
__lowerCamelCase : int = super().get_config()
config.update({"""weight_decay_rate""": self.weight_decay_rate} )
return config
def _snake_case ( self : Dict , _lowerCamelCase : List[str] ):
'''simple docstring'''
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(_lowerCamelCase , _lowerCamelCase ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(_lowerCamelCase , _lowerCamelCase ) is not None:
return False
return True
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Union[str, Any] ):
'''simple docstring'''
__lowerCamelCase : Optional[int] = []
__lowerCamelCase : int = None
@property
def _snake_case ( self : Tuple ):
'''simple docstring'''
if self._accum_steps is None:
__lowerCamelCase : Any = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=_lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def _snake_case ( self : Any ):
'''simple docstring'''
if not self._gradients:
raise ValueError("""The accumulator should be called first to initialize the gradients""" )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[str] , _lowerCamelCase : List[str] ):
'''simple docstring'''
if not self._gradients:
__lowerCamelCase : List[str] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(_lowerCamelCase ) , trainable=_lowerCamelCase , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(_lowerCamelCase ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(_lowerCamelCase )}""" )
for accum_gradient, gradient in zip(self._gradients , _lowerCamelCase ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(_lowerCamelCase )
self._accum_steps.assign_add(1 )
def _snake_case ( self : Any ):
'''simple docstring'''
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(_lowerCamelCase ) )
| 519 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class lowerCAmelCase :
@staticmethod
def _A ( *a__ : Optional[int] , **a__ : Any ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
snake_case = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class lowerCAmelCase ( unittest.TestCase ):
A_ : int = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def _A ( self : Any , a__ : Dict , a__ : int , a__ : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = pipeline(
"document-question-answering" , model=a__ , tokenizer=a__ , image_processor=a__ )
lowerCAmelCase__ : Optional[int] = INVOICE_URL
lowerCAmelCase__ : List[Any] = list(zip(*apply_tesseract(load_image(a__ ) , a__ , "" ) ) )
lowerCAmelCase__ : int = "What is the placebo?"
lowerCAmelCase__ : Optional[int] = [
{
"image": load_image(a__ ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def _A ( self : Dict , a__ : Tuple , a__ : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = dqa_pipeline(a__ , top_k=2 )
self.assertEqual(
a__ , [
[
{"score": ANY(a__ ), "answer": ANY(a__ ), "start": ANY(a__ ), "end": ANY(a__ )},
{"score": ANY(a__ ), "answer": ANY(a__ ), "start": ANY(a__ ), "end": ANY(a__ )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCAmelCase__ : Optional[int] = INVOICE_URL
lowerCAmelCase__ : Tuple = "How many cats are there?"
lowerCAmelCase__ : str = [
{"score": 0.0001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCAmelCase__ : Tuple = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
lowerCAmelCase__ : Union[str, Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , a__ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCAmelCase__ : Union[str, Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase__ : Optional[Any] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(a__ , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCAmelCase__ : Optional[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : int = dqa_pipeline(image=a__ , question=a__ , words=a__ , boxes=a__ , top_k=2 )
self.assertEqual(a__ , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCAmelCase__ : Any = INVOICE_URL
lowerCAmelCase__ : int = "What is the invoice number?"
lowerCAmelCase__ : List[Any] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase__ : Union[str, Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase__ : int = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"score": 0.9944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def _A ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCAmelCase__ : List[Any] = INVOICE_URL
lowerCAmelCase__ : Optional[Any] = "What is the invoice number?"
lowerCAmelCase__ : Any = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase__ : Any = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase__ : str = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"score": 0.9974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=a__ )
lowerCAmelCase__ : Tuple = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=a__ , revision="3dc6de3" , )
lowerCAmelCase__ : Optional[Any] = INVOICE_URL
lowerCAmelCase__ : int = "What is the invoice number?"
lowerCAmelCase__ : Any = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCAmelCase__ : List[Any] = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCAmelCase__ : Union[str, Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCAmelCase__ : Optional[int] = list(zip(*apply_tesseract(load_image(a__ ) , a__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ : Any = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.4251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def _A ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=a__ )
lowerCAmelCase__ : Optional[int] = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=a__ , revision="3dc6de3" , max_seq_len=50 , )
lowerCAmelCase__ : List[Any] = INVOICE_URL
lowerCAmelCase__ : int = "What is the invoice number?"
lowerCAmelCase__ : Optional[Any] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCAmelCase__ : List[Any] = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
[
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCAmelCase__ : Dict = list(zip(*apply_tesseract(load_image(a__ ) , a__ , "" ) ) )
# This model should also work if `image` is set to None
lowerCAmelCase__ : int = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"score": 0.9999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def _A ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCAmelCase__ : Optional[int] = INVOICE_URL
lowerCAmelCase__ : List[Any] = "What is the invoice number?"
lowerCAmelCase__ : Optional[Any] = dqa_pipeline(image=a__ , question=a__ , top_k=2 )
self.assertEqual(nested_simplify(a__ , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def _A ( self : int ):
'''simple docstring'''
pass
| 568 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
A_ : Optional[Any] = LayoutLMTokenizer
A_ : Union[str, Any] = LayoutLMTokenizerFast
A_ : int = True
A_ : str = True
def _A ( self : Union[str, Any] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Any = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def _A ( self : int , **a__ : Optional[Any] ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **a__ )
def _A ( self : Optional[Any] , a__ : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = "UNwant\u00E9d,running"
lowerCAmelCase__ : Dict = "unwanted, running"
return input_text, output_text
def _A ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.tokenizer_class(self.vocab_file )
lowerCAmelCase__ : Tuple = tokenizer.tokenize("UNwant\u00E9d,running" )
self.assertListEqual(a__ , ["un", "##want", "##ed", ",", "runn", "##ing"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ) , [7, 4, 5, 10, 8, 9] )
def _A ( self : Optional[Any] ):
'''simple docstring'''
pass
| 568 | 1 |
'''simple docstring'''
def UpperCamelCase ( lowercase_ : int = 1_0_0_0 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 , lowercase_ ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 72 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class A ( UpperCamelCase_ ):
def __init__( self : str ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] =[]
def lowerCamelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[int] , **lowercase_ : List[Any] ) -> List[Any]:
"""simple docstring"""
self.events.append('on_init_end' )
def lowerCamelCase ( self : int , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[Any] , **lowercase_ : Any ) -> Union[str, Any]:
"""simple docstring"""
self.events.append('on_train_begin' )
def lowerCamelCase ( self : Optional[int] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Tuple , **lowercase_ : str ) -> Optional[Any]:
"""simple docstring"""
self.events.append('on_train_end' )
def lowerCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Dict , lowercase_ : int , **lowercase_ : List[str] ) -> Dict:
"""simple docstring"""
self.events.append('on_epoch_begin' )
def lowerCamelCase ( self : Dict , lowercase_ : Any , lowercase_ : Dict , lowercase_ : List[Any] , **lowercase_ : Dict ) -> Optional[Any]:
"""simple docstring"""
self.events.append('on_epoch_end' )
def lowerCamelCase ( self : List[str] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Any , **lowercase_ : Dict ) -> Tuple:
"""simple docstring"""
self.events.append('on_step_begin' )
def lowerCamelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Tuple:
"""simple docstring"""
self.events.append('on_step_end' )
def lowerCamelCase ( self : Tuple , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , **lowercase_ : List[Any] ) -> Any:
"""simple docstring"""
self.events.append('on_evaluate' )
def lowerCamelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] , **lowercase_ : Dict ) -> Dict:
"""simple docstring"""
self.events.append('on_predict' )
def lowerCamelCase ( self : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : List[Any] , **lowercase_ : Optional[int] ) -> Any:
"""simple docstring"""
self.events.append('on_save' )
def lowerCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Optional[int] , **lowercase_ : Optional[int] ) -> List[str]:
"""simple docstring"""
self.events.append('on_log' )
def lowerCamelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : List[Any] , **lowercase_ : str ) -> List[Any]:
"""simple docstring"""
self.events.append('on_prediction_step' )
@require_torch
class A ( unittest.TestCase ):
def lowerCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
_lowerCamelCase : Dict =tempfile.mkdtemp()
def lowerCamelCase ( self : Union[str, Any] ) -> int:
"""simple docstring"""
shutil.rmtree(self.output_dir )
def lowerCamelCase ( self : Optional[Any] , lowercase_ : Optional[int]=0 , lowercase_ : int=0 , lowercase_ : Dict=64 , lowercase_ : List[str]=64 , lowercase_ : int=None , lowercase_ : Tuple=False , **lowercase_ : Dict ) -> Optional[int]:
"""simple docstring"""
_lowerCamelCase : int =RegressionDataset(length=lowercase_ )
_lowerCamelCase : Optional[Any] =RegressionDataset(length=lowercase_ )
_lowerCamelCase : str =RegressionModelConfig(a=lowercase_ , b=lowercase_ )
_lowerCamelCase : Optional[Any] =RegressionPreTrainedModel(lowercase_ )
_lowerCamelCase : str =TrainingArguments(self.output_dir , disable_tqdm=lowercase_ , report_to=[] , **lowercase_ )
return Trainer(
lowercase_ , lowercase_ , train_dataset=lowercase_ , eval_dataset=lowercase_ , callbacks=lowercase_ , )
def lowerCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : int ) -> str:
"""simple docstring"""
self.assertEqual(len(lowercase_ ) , len(lowercase_ ) )
# Order doesn't matter
_lowerCamelCase : Any =sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
_lowerCamelCase : Any =sorted(lowercase_ , key=lambda lowercase_ : cb.__name__ if isinstance(lowercase_ , lowercase_ ) else cb.__class__.__name__ )
for cba, cba in zip(lowercase_ , lowercase_ ):
if isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , lowercase_ )
elif isinstance(lowercase_ , lowercase_ ) and not isinstance(lowercase_ , lowercase_ ):
self.assertEqual(lowercase_ , cba.__class__ )
elif not isinstance(lowercase_ , lowercase_ ) and isinstance(lowercase_ , lowercase_ ):
self.assertEqual(cba.__class__ , lowercase_ )
else:
self.assertEqual(lowercase_ , lowercase_ )
def lowerCamelCase ( self : Dict , lowercase_ : List[str] ) -> str:
"""simple docstring"""
_lowerCamelCase : List[Any] =['on_init_end', 'on_train_begin']
_lowerCamelCase : Dict =0
_lowerCamelCase : str =len(trainer.get_eval_dataloader() )
_lowerCamelCase : List[Any] =['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(lowercase_ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def lowerCamelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_lowerCamelCase : Any =self.get_trainer()
_lowerCamelCase : Optional[Any] =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# Callbacks passed at init are added to the default callbacks
_lowerCamelCase : Dict =self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_lowerCamelCase : List[str] =self.get_trainer(disable_tqdm=lowercase_ )
_lowerCamelCase : Tuple =DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def lowerCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
_lowerCamelCase : int =DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_lowerCamelCase : List[str] =self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
_lowerCamelCase : Tuple =self.get_trainer()
_lowerCamelCase : Dict =trainer.pop_callback(lowercase_ )
self.assertEqual(cb.__class__ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
# We can also add, pop, or remove by instance
_lowerCamelCase : Optional[int] =self.get_trainer()
_lowerCamelCase : Any =trainer.callback_handler.callbacks[0]
trainer.remove_callback(lowercase_ )
expected_callbacks.remove(lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
_lowerCamelCase : Any =self.get_trainer()
_lowerCamelCase : Optional[int] =trainer.callback_handler.callbacks[0]
_lowerCamelCase : Dict =trainer.pop_callback(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
trainer.add_callback(lowercase_ )
expected_callbacks.insert(0 , lowercase_ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , lowercase_ )
def lowerCamelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=lowercase_ )
_lowerCamelCase : str =self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_lowerCamelCase : List[Any] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# Independent log/save/eval
_lowerCamelCase : str =self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_lowerCamelCase : Union[str, Any] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
_lowerCamelCase : int =self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_lowerCamelCase : Optional[Any] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
_lowerCamelCase : str =self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
_lowerCamelCase : Any =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
_lowerCamelCase : Optional[int] =self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
_lowerCamelCase : Optional[int] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# A bit of everything
_lowerCamelCase : Optional[Any] =self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
_lowerCamelCase : List[str] =trainer.callback_handler.callbacks[-2].events
self.assertEqual(lowercase_ , self.get_expected_events(lowercase_ ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
_lowerCamelCase : Union[str, Any] =self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(lowercase_ ) in warn_mock.call_args[0][0]
| 464 | 0 |
'''simple docstring'''
def UpperCAmelCase_ ( __lowercase : int = 1000 ) -> int:
'''simple docstring'''
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution())
| 119 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__SCREAMING_SNAKE_CASE :List[str] = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__SCREAMING_SNAKE_CASE :List[Any] = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__SCREAMING_SNAKE_CASE :List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 119 | 1 |
SCREAMING_SNAKE_CASE__ : dict[tuple[int, int, int], int] = {}
def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> int:
'''simple docstring'''
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCAmelCase__ : Any = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCAmelCase__ : str = _calculate(days - 1 , __lowerCamelCase , late + 1 )
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCAmelCase__ : Optional[int] = _calculate(days - 1 , absent + 1 , 0 )
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCAmelCase__ : List[Any] = _calculate(days - 1 , __lowerCamelCase , 0 )
UpperCAmelCase__ : Optional[Any] = state_late + state_absent + state_ontime
UpperCAmelCase__ : Tuple = prizestrings
return prizestrings
def _lowerCamelCase ( __lowerCamelCase = 30 ) -> int:
'''simple docstring'''
return _calculate(__lowerCamelCase , absent=0 , late=0 )
if __name__ == "__main__":
print(solution())
| 79 |
'''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def a ( UpperCamelCase_ : Any ) -> List[str]:
snake_case__ =os.path.join(args.tf_model_dir , 'parameters.json' )
snake_case__ =json.loads(open(UpperCamelCase_ ).read() )
if not params:
raise ValueError(
f"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith('.pt' ):
snake_case__ =args.output + '.pt'
snake_case__ =OrderedDict()
with tf.device('/CPU:0' ):
snake_case__ =tf.train.load_checkpoint(args.tf_model_dir )
snake_case__ =reader.get_variable_to_shape_map()
for key_name in shapes.keys():
snake_case__ =reader.get_tensor(UpperCamelCase_ ).astype(np.floataa )
if key_name.endswith('/adam_m' ) or key_name.endswith('/adam_v' ):
continue
if key_name.startswith('pasts/' ):
if key_name.startswith('pasts/mlp' ):
snake_case__ =int(key_name[9] )
elif key_name.startswith('pasts/out' ):
snake_case__ =8
snake_case__ ='model.sqout.%d.weight' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
snake_case__ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.startswith('model/moe' ):
snake_case__ =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/switch_gating/kernel' ):
snake_case__ ='model.blocks.%d.feed_forward.mlp.router.classifier.weight' % player
snake_case__ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.endswith('/softmlp/kernel' ):
snake_case__ ='model.blocks.%d.feed_forward.soft_bypass_mlp.weight' % player
snake_case__ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.endswith('/wo/kernel' ) or key_name.endswith('/wi/kernel' ):
snake_case__ =key_name[-9:-7]
for i in range(16 ):
snake_case__ ='model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight' % (player, i, nlayer)
snake_case__ =(
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.startswith('model/mlp' ):
snake_case__ =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/p1/kernel' ):
snake_case__ ='model.blocks.%d.feed_forward.mlp.wi.weight' % player
snake_case__ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.endswith('/p1/bias' ):
snake_case__ ='model.blocks.%d.feed_forward.mlp.wi.bias' % player
snake_case__ =vnp.copy() # same because it is one dimensional
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.endswith('/p2/kernel' ):
snake_case__ ='model.blocks.%d.feed_forward.mlp.wo.weight' % player
snake_case__ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.endswith('/p2/bias' ):
snake_case__ ='model.blocks.%d.feed_forward.mlp.wo.bias' % player
snake_case__ =vnp.copy() # same because it is one dimensional
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.startswith('model/ln' ):
snake_case__ =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
snake_case__ ='model.blocks.%d.feed_forward.norm.bias' % player
snake_case__ =vnp.copy() # same because it is one dimensional
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.endswith('/g' ):
snake_case__ ='model.blocks.%d.feed_forward.norm.weight' % player
snake_case__ =vnp.copy() # same because it is one dimensional
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.startswith('model/att' ):
snake_case__ =int(key_name[9:].split('/' )[0] )
if key_name.endswith('/qkv/kernel' ):
snake_case__ =vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
snake_case__ =state[:, 0, :, :]
snake_case__ =state[:, 1, :, :]
snake_case__ =state[:, 2, :, :]
snake_case__ =(
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ =(
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ =(
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ ='model.blocks.%d.self_attn.self_attn.q_proj.weight' % player
snake_case__ =torch.tensor(UpperCamelCase_ )
snake_case__ ='model.blocks.%d.self_attn.self_attn.k_proj.weight' % player
snake_case__ =torch.tensor(UpperCamelCase_ )
snake_case__ ='model.blocks.%d.self_attn.self_attn.v_proj.weight' % player
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.endswith('/o/kernel' ):
snake_case__ ='model.blocks.%d.self_attn.self_attn.out_proj.weight' % player
snake_case__ =(
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.startswith('model/an' ):
snake_case__ =int(key_name[8:].split('/' )[0] )
if key_name.endswith('/b' ):
snake_case__ ='model.blocks.%d.self_attn.norm.bias' % player
snake_case__ =vnp.copy() # same because it is one dimensional
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.endswith('/g' ):
snake_case__ ='model.blocks.%d.self_attn.norm.weight' % player
snake_case__ =vnp.copy() # same because it is one dimensional
snake_case__ =torch.tensor(UpperCamelCase_ )
elif (
key_name.startswith('model/wte' )
or key_name.startswith('model/wpe' )
or key_name.startswith('model/ete' )
):
snake_case__ ={'wte': 'embed_tokens', 'wpe': 'position_embeddings', 'ete': 'extra_position_embeddings'}[
key_name[-3:]
]
snake_case__ ='model.%s.weight' % nlayer
snake_case__ =vnp.copy() # same in embedded
snake_case__ =torch.tensor(UpperCamelCase_ )
if key_name.startswith('model/wte' ):
snake_case__ ='lm_head.weight'
snake_case__ =vnp.copy() # same in embedded
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name.startswith('model/wob' ):
snake_case__ ='final_logits_bias'
snake_case__ =vnp.copy() # same in embedded
snake_case__ =state.reshape((1, -1) )
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense/kernel":
snake_case__ ='model.last_project.weight'
snake_case__ =vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
snake_case__ =torch.tensor(UpperCamelCase_ )
elif key_name == "model/dense_1/bias":
snake_case__ ='model.last_project.bias'
snake_case__ =vnp.copy() # same because it is one dimensional
snake_case__ =torch.tensor(UpperCamelCase_ )
torch.save(UpperCamelCase_ , args.output )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 538 | 0 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class __snake_case ( __UpperCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = BertTokenizer
lowerCAmelCase__ = BertTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = filter_non_english
def UpperCAmelCase__ ( self : Dict ):
super().setUp()
__snake_case: Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__snake_case: Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Dict ):
__snake_case: List[str] = """UNwant\u00E9d,running"""
__snake_case: Dict = """unwanted, running"""
return input_text, output_text
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self.tokenizer_class(self.vocab_file )
__snake_case: Any = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(A , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [9, 6, 7, 12, 10, 11] )
def UpperCAmelCase__ ( self : Dict ):
if not self.test_rust_tokenizer:
return
__snake_case: Any = self.get_tokenizer()
__snake_case: Optional[Any] = self.get_rust_tokenizer()
__snake_case: Any = """UNwant\u00E9d,running"""
__snake_case: Dict = tokenizer.tokenize(A )
__snake_case: str = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
__snake_case: Optional[int] = tokenizer.encode(A , add_special_tokens=A )
__snake_case: List[str] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
__snake_case: Optional[int] = self.get_rust_tokenizer()
__snake_case: Optional[int] = tokenizer.encode(A )
__snake_case: Optional[Any] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
# With lower casing
__snake_case: Optional[int] = self.get_tokenizer(do_lower_case=A )
__snake_case: int = self.get_rust_tokenizer(do_lower_case=A )
__snake_case: int = """UNwant\u00E9d,running"""
__snake_case: Optional[int] = tokenizer.tokenize(A )
__snake_case: Optional[int] = rust_tokenizer.tokenize(A )
self.assertListEqual(A , A )
__snake_case: Tuple = tokenizer.encode(A , add_special_tokens=A )
__snake_case: Optional[Any] = rust_tokenizer.encode(A , add_special_tokens=A )
self.assertListEqual(A , A )
__snake_case: Tuple = self.get_rust_tokenizer()
__snake_case: Tuple = tokenizer.encode(A )
__snake_case: Union[str, Any] = rust_tokenizer.encode(A )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[Any] = BasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[Any] = BasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = BasicTokenizer(do_lower_case=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = BasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = BasicTokenizer(do_lower_case=A , strip_accents=A )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[int] = BasicTokenizer(do_lower_case=A , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[Any] = BasicTokenizer()
__snake_case: Any = """a\n\'ll !!to?\'d of, can\'t."""
__snake_case: List[str] = ["""a""", """\'""", """ll""", """!""", """!""", """to""", """?""", """\'""", """d""", """of""", """,""", """can""", """\'""", """t""", """."""]
self.assertListEqual(tokenizer.tokenize(A ) , A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
__snake_case: Any = {}
for i, token in enumerate(A ):
__snake_case: List[str] = i
__snake_case: List[Any] = WordpieceTokenizer(vocab=A , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def UpperCAmelCase__ ( self : List[str] ):
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: str = self.get_tokenizer()
__snake_case: str = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(A ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def UpperCAmelCase__ ( self : int ):
__snake_case: int = self.tokenizer_class.from_pretrained("""bert-base-uncased""" )
__snake_case: List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A )
__snake_case: List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A )
__snake_case: Tuple = tokenizer.build_inputs_with_special_tokens(A )
__snake_case: int = tokenizer.build_inputs_with_special_tokens(A , A )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def UpperCAmelCase__ ( self : Optional[int] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: int = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Dict = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'''
__snake_case: Dict = tokenizer_r.encode_plus(
A , return_attention_mask=A , return_token_type_ids=A , return_offsets_mapping=A , add_special_tokens=A , )
__snake_case: str = tokenizer_r.do_lower_case if hasattr(A , """do_lower_case""" ) else False
__snake_case: Dict = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = ["""的""", """人""", """有"""]
__snake_case: int = """""".join(A )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Union[str, Any] = True
__snake_case: str = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: Dict = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: int = tokenizer_p.encode(A , add_special_tokens=A )
__snake_case: Optional[int] = tokenizer_r.encode(A , add_special_tokens=A )
__snake_case: Optional[Any] = tokenizer_r.convert_ids_to_tokens(A )
__snake_case: Any = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A , A )
self.assertListEqual(A , A )
__snake_case: List[str] = False
__snake_case: Tuple = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: List[Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: Dict = tokenizer_r.encode(A , add_special_tokens=A )
__snake_case: str = tokenizer_p.encode(A , add_special_tokens=A )
__snake_case: Tuple = tokenizer_r.convert_ids_to_tokens(A )
__snake_case: List[Any] = tokenizer_p.convert_ids_to_tokens(A )
# it is expected that only the first Chinese character is not preceded by "##".
__snake_case: str = [
f'''##{token}''' if idx != 0 else token for idx, token in enumerate(A )
]
self.assertListEqual(A , A )
self.assertListEqual(A , A )
| 717 |
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=5) -> Optional[int]:
# Adapted from https://github.com/pytorch/fairseq/blob/master/fairseq/models/roberta/hub_interface.py
assert masked_input.count("""<mask>""") == 1
__snake_case: Dict = torch.tensor(tokenizer.encode(SCREAMING_SNAKE_CASE__ , add_special_tokens=SCREAMING_SNAKE_CASE__)).unsqueeze(0) # Batch size 1
__snake_case: Optional[Any] = model(SCREAMING_SNAKE_CASE__)[0] # The last hidden-state is the first element of the output tuple
__snake_case: Tuple = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
__snake_case: List[Any] = logits[0, masked_index, :]
__snake_case: Optional[int] = logits.softmax(dim=0)
__snake_case , __snake_case: Optional[int] = prob.topk(k=SCREAMING_SNAKE_CASE__ , dim=0)
__snake_case: List[str] = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item()) for i in range(len(SCREAMING_SNAKE_CASE__))])
__snake_case: List[Any] = tokenizer.mask_token
__snake_case: Union[str, Any] = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """)):
__snake_case: Any = predicted_token_bpe.replace("""\u2581""" , """ """)
if " {0}".format(SCREAMING_SNAKE_CASE__) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__),
values[index].item(),
predicted_token,
))
else:
topk_filled_outputs.append(
(
masked_input.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__),
values[index].item(),
predicted_token,
))
return topk_filled_outputs
__UpperCAmelCase : Union[str, Any] = CamembertTokenizer.from_pretrained("camembert-base")
__UpperCAmelCase : int = CamembertForMaskedLM.from_pretrained("camembert-base")
model.eval()
__UpperCAmelCase : List[Any] = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 155 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=1_3 , __UpperCAmelCase=3_2 , __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=1_6 , __UpperCAmelCase=[1, 2, 1] , __UpperCAmelCase=[2, 2, 4] , __UpperCAmelCase=2 , __UpperCAmelCase=2.0 , __UpperCAmelCase=True , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.0 , __UpperCAmelCase=0.1 , __UpperCAmelCase="gelu" , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=0.02 , __UpperCAmelCase=1E-5 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=1_0 , __UpperCAmelCase=8 , __UpperCAmelCase=["stage1", "stage2", "stage3"] , __UpperCAmelCase=[1, 2, 3] , ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = parent
lowerCAmelCase__ :str = batch_size
lowerCAmelCase__ :List[str] = image_size
lowerCAmelCase__ :Optional[int] = patch_size
lowerCAmelCase__ :List[str] = num_channels
lowerCAmelCase__ :Tuple = embed_dim
lowerCAmelCase__ :Optional[int] = depths
lowerCAmelCase__ :Optional[Any] = num_heads
lowerCAmelCase__ :List[str] = window_size
lowerCAmelCase__ :int = mlp_ratio
lowerCAmelCase__ :Union[str, Any] = qkv_bias
lowerCAmelCase__ :Any = hidden_dropout_prob
lowerCAmelCase__ :str = attention_probs_dropout_prob
lowerCAmelCase__ :Union[str, Any] = drop_path_rate
lowerCAmelCase__ :List[Any] = hidden_act
lowerCAmelCase__ :str = use_absolute_embeddings
lowerCAmelCase__ :int = patch_norm
lowerCAmelCase__ :Tuple = layer_norm_eps
lowerCAmelCase__ :Optional[int] = initializer_range
lowerCAmelCase__ :List[Any] = is_training
lowerCAmelCase__ :Optional[Any] = scope
lowerCAmelCase__ :int = use_labels
lowerCAmelCase__ :str = type_sequence_label_size
lowerCAmelCase__ :List[str] = encoder_stride
lowerCAmelCase__ :Tuple = out_features
lowerCAmelCase__ :Tuple = out_indices
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ :Any = None
if self.use_labels:
lowerCAmelCase__ :Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ :Any = self.get_config()
return config, pixel_values, labels
def snake_case ( self ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = MaskFormerSwinModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :int = model(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ :Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = MaskFormerSwinBackbone(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :int = model(__UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :List[str] = ['stem']
lowerCAmelCase__ :Dict = MaskFormerSwinBackbone(config=__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = config_and_inputs
lowerCAmelCase__ :Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( a , a , unittest.TestCase ):
"""simple docstring"""
__magic_name__ :Dict = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__magic_name__ :List[Any] = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
__magic_name__ :List[str] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :Union[str, Any] = False
__magic_name__ :Optional[Any] = False
__magic_name__ :str = False
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = MaskFormerSwinModelTester(self )
lowerCAmelCase__ :Any = ConfigTester(self , config_class=__UpperCAmelCase , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def snake_case ( self ):
'''simple docstring'''
return
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCAmelCase )
@unittest.skip('Swin does not use inputs_embeds' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip('Swin does not support feedforward chunking' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ :Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCAmelCase , nn.Linear ) )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ :Union[str, Any] = model_class(__UpperCAmelCase )
lowerCAmelCase__ :List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ :Union[str, Any] = [*signature.parameters.keys()]
lowerCAmelCase__ :Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , __UpperCAmelCase )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ :Dict = model(**self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase ) )
lowerCAmelCase__ :Tuple = outputs.hidden_states
lowerCAmelCase__ :Any = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCAmelCase ) , __UpperCAmelCase )
# Swin has a different seq_length
lowerCAmelCase__ :Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ :Any = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ :Dict = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :Dict = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :Any = 3
lowerCAmelCase__ :List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ :str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ :Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ :List[str] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ :Tuple = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ :str = True
self.check_hidden_states_output(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def snake_case ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def snake_case ( self ):
'''simple docstring'''
pass
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = 0
return t
def check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase={} ):
with torch.no_grad():
lowerCAmelCase__ :List[str] = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase )
lowerCAmelCase__ :Any = model(**__UpperCAmelCase , return_dict=__UpperCAmelCase , **__UpperCAmelCase ).to_tuple()
def recursive_check(__UpperCAmelCase , __UpperCAmelCase ):
if isinstance(__UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__UpperCAmelCase , __UpperCAmelCase ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif isinstance(__UpperCAmelCase , __UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__UpperCAmelCase ) , set_nan_tensor_to_zero(__UpperCAmelCase ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
F" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
F" {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}. Dict has"
F" `nan`: {torch.isnan(__UpperCAmelCase ).any()} and `inf`: {torch.isinf(__UpperCAmelCase )}."
) , )
recursive_check(__UpperCAmelCase , __UpperCAmelCase )
for model_class in self.all_model_classes:
lowerCAmelCase__ :str = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCAmelCase__ :List[str] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :int = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[int] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Union[str, Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
lowerCAmelCase__ :Optional[Any] = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'output_hidden_states': True} )
lowerCAmelCase__ :int = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
lowerCAmelCase__ :Dict = self._prepare_for_class(__UpperCAmelCase , __UpperCAmelCase , return_labels=__UpperCAmelCase )
check_equivalence(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , {'output_hidden_states': True} )
@require_torch
class _lowerCAmelCase ( unittest.TestCase , a ):
"""simple docstring"""
__magic_name__ :Dict = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__magic_name__ :str = MaskFormerSwinConfig
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :str = MaskFormerSwinModelTester(self )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ :Optional[Any] = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
lowerCAmelCase__ :Optional[int] = backbone_class(__UpperCAmelCase )
backbone.to(__UpperCAmelCase )
backbone.eval()
lowerCAmelCase__ :List[str] = backbone(**__UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCAmelCase__ :Any = backbone(**__UpperCAmelCase , output_hidden_states=__UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ :Tuple = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCAmelCase__ :Tuple = backbone(**__UpperCAmelCase , output_attentions=__UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 93 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __UpperCAmelCase ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
_snake_case : Dict = KandinskyInpaintPipeline
_snake_case : int = ['prompt', 'image_embeds', 'negative_image_embeds', 'image', 'mask_image']
_snake_case : str = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
'mask_image',
]
_snake_case : Optional[int] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
_snake_case : Optional[Any] = False
@property
def A ( self : int )-> Tuple:
return 32
@property
def A ( self : int )-> List[Any]:
return 32
@property
def A ( self : Dict )-> Tuple:
return self.time_input_dim
@property
def A ( self : Union[str, Any] )-> Tuple:
return self.time_input_dim * 4
@property
def A ( self : Dict )-> str:
return 1_00
@property
def A ( self : int )-> Dict:
__UpperCamelCase = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def A ( self : Tuple )-> Optional[Any]:
torch.manual_seed(0 )
__UpperCamelCase = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCamelCase = MultilingualCLIP(A_ )
__UpperCamelCase = text_encoder.eval()
return text_encoder
@property
def A ( self : int )-> str:
torch.manual_seed(0 )
__UpperCamelCase = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCamelCase = UNetaDConditionModel(**A_ )
return model
@property
def A ( self : Optional[int] )-> Union[str, Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def A ( self : List[str] )-> Tuple:
torch.manual_seed(0 )
__UpperCamelCase = VQModel(**self.dummy_movq_kwargs )
return model
def A ( self : str )-> List[Any]:
__UpperCamelCase = self.dummy_text_encoder
__UpperCamelCase = self.dummy_tokenizer
__UpperCamelCase = self.dummy_unet
__UpperCamelCase = self.dummy_movq
__UpperCamelCase = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00_085 , beta_end=0.012 , clip_sample=A_ , set_alpha_to_one=A_ , steps_offset=1 , prediction_type="epsilon" , thresholding=A_ , )
__UpperCamelCase = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def A ( self : Union[str, Any] , A_ : Optional[Any] , A_ : Optional[Any]=0 )-> Dict:
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(A_ )
# create init_image
__UpperCamelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(A_ ) ).to(A_ )
__UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCamelCase = Image.fromarray(np.uinta(A_ ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
__UpperCamelCase = np.ones((64, 64) , dtype=np.floataa )
__UpperCamelCase = 0
if str(A_ ).startswith("mps" ):
__UpperCamelCase = torch.manual_seed(A_ )
else:
__UpperCamelCase = torch.Generator(device=A_ ).manual_seed(A_ )
__UpperCamelCase = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def A ( self : Optional[int] )-> Dict:
__UpperCamelCase = "cpu"
__UpperCamelCase = self.get_dummy_components()
__UpperCamelCase = self.pipeline_class(**A_ )
__UpperCamelCase = pipe.to(A_ )
pipe.set_progress_bar_config(disable=A_ )
__UpperCamelCase = pipe(**self.get_dummy_inputs(A_ ) )
__UpperCamelCase = output.images
__UpperCamelCase = pipe(
**self.get_dummy_inputs(A_ ) , return_dict=A_ , )[0]
__UpperCamelCase = image[0, -3:, -3:, -1]
__UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase = np.array(
[0.8_326_919, 0.73_790_467, 0.20_918_581, 0.9_309_612, 0.5_511_791, 0.43_713_328, 0.5_513_321, 0.49_922_934, 0.59_497_786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def A ( self : Union[str, Any] )-> int:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def A ( self : str )-> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Any )-> str:
__UpperCamelCase = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
__UpperCamelCase = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCamelCase = np.ones((7_68, 7_68) , dtype=np.floataa )
__UpperCamelCase = 0
__UpperCamelCase = "a hat"
__UpperCamelCase = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(A_ )
__UpperCamelCase = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
__UpperCamelCase = pipeline.to(A_ )
pipeline.set_progress_bar_config(disable=A_ )
__UpperCamelCase = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase = pipe_prior(
A_ , generator=A_ , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCamelCase = pipeline(
A_ , image=A_ , mask_image=A_ , image_embeds=A_ , negative_image_embeds=A_ , generator=A_ , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
__UpperCamelCase = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(A_ , A_ ) | 505 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Dataset , SCREAMING_SNAKE_CASE :Dict[str, str] ) -> Tuple:
__lowerCAmelCase : str = args.log_outputs
__lowerCAmelCase : str = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] )
# load metric
__lowerCAmelCase : Any = load_metric("""wer""" )
__lowerCAmelCase : Any = load_metric("""cer""" )
# compute metrics
__lowerCAmelCase : Union[str, Any] = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
__lowerCAmelCase : Dict = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] )
# print & log results
__lowerCAmelCase : Tuple = F'''WER: {wer_result}\nCER: {cer_result}'''
print(SCREAMING_SNAKE_CASE )
with open(F'''{dataset_id}_eval_results.txt''' , """w""" ) as f:
f.write(SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
__lowerCAmelCase : Tuple = F'''log_{dataset_id}_predictions.txt'''
__lowerCAmelCase : Tuple = F'''log_{dataset_id}_targets.txt'''
with open(SCREAMING_SNAKE_CASE , """w""" ) as p, open(SCREAMING_SNAKE_CASE , """w""" ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Tuple ):
p.write(F'''{i}''' + """\n""" )
p.write(batch["""prediction"""] + """\n""" )
t.write(F'''{i}''' + """\n""" )
t.write(batch["""target"""] + """\n""" )
result.map(SCREAMING_SNAKE_CASE , with_indices=SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> str:
__lowerCAmelCase : Any = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
__lowerCAmelCase : Optional[int] = re.sub(SCREAMING_SNAKE_CASE , """""" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
__lowerCAmelCase : List[str] = ["""\n\n""", """\n""", """ """, """ """]
for t in token_sequences_to_ignore:
__lowerCAmelCase : Tuple = """ """.join(text.split(SCREAMING_SNAKE_CASE ) )
return text
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] ) -> Optional[int]:
# load dataset
__lowerCAmelCase : Tuple = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
__lowerCAmelCase : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
__lowerCAmelCase : Any = feature_extractor.sampling_rate
# resample audio
__lowerCAmelCase : int = dataset.cast_column("""audio""" , Audio(sampling_rate=SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
__lowerCAmelCase : Any = 0 if torch.cuda.is_available() else -1
__lowerCAmelCase : Any = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE :Tuple ):
__lowerCAmelCase : List[str] = asr(
batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
__lowerCAmelCase : Dict = prediction["""text"""]
__lowerCAmelCase : Tuple = normalize_text(batch["""sentence"""] )
return batch
# run inference on all examples
__lowerCAmelCase : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
_UpperCAmelCase = parser.parse_args()
main(args) | 240 |
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any]=() , SCREAMING_SNAKE_CASE :Tuple=None , SCREAMING_SNAKE_CASE :int="no" , SCREAMING_SNAKE_CASE :Dict="29500" ) -> Dict:
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : int = False
if any(key.startswith("""KAGGLE""" ) for key in os.environ.keys() ):
__lowerCAmelCase : int = True
elif "IPython" in sys.modules:
__lowerCAmelCase : Optional[int] = """google.colab""" in str(sys.modules["""IPython"""].get_ipython() )
try:
__lowerCAmelCase : List[str] = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F'''Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.''' )
if (in_colab or in_kaggle) and (os.environ.get("""TPU_NAME""" , SCREAMING_SNAKE_CASE ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside """
"""your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if num_processes is None:
__lowerCAmelCase : List[Any] = 8
__lowerCAmelCase : List[str] = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="""TPU""" )
print(F'''Launching a training on {num_processes} TPU cores.''' )
xmp.spawn(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on one CPU.""" )
function(*SCREAMING_SNAKE_CASE )
else:
if num_processes is None:
raise ValueError(
"""You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.""" )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
"""To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized """
"""inside your training function. Restart your notebook and make sure no cells initializes an """
"""`Accelerator`.""" )
if torch.cuda.is_initialized():
raise ValueError(
"""To launch a multi-GPU training from your notebook, you need to avoid running any instruction """
"""using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA """
"""function.""" )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port=SCREAMING_SNAKE_CASE , mixed_precision=SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = PrepareForLaunch(SCREAMING_SNAKE_CASE , distributed_type="""MULTI_GPU""" )
print(F'''Launching training on {num_processes} GPUs.''' )
try:
start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
"""CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. """
"""This likely stems from an outside import causing issues once the `notebook_launcher()` is called. """
"""Please review your imports and test them when running the `notebook_launcher()` to identify """
"""which one is problematic.""" ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
__lowerCAmelCase : Optional[Any] = """1"""
print("""Launching training on MPS.""" )
elif torch.cuda.is_available():
print("""Launching training on one GPU.""" )
else:
print("""Launching training on CPU.""" )
function(*SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int , SCREAMING_SNAKE_CASE :Optional[Any]=() , SCREAMING_SNAKE_CASE :Optional[int]=2 ) -> Dict:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=SCREAMING_SNAKE_CASE , master_addr="""127.0.01""" , master_port="""29500""" , accelerate_mixed_precision="""no""" , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu="""yes""" , ):
__lowerCAmelCase : Any = PrepareForLaunch(SCREAMING_SNAKE_CASE , debug=SCREAMING_SNAKE_CASE )
start_processes(SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , nprocs=SCREAMING_SNAKE_CASE , start_method="""fork""" ) | 240 | 1 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
__A : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : WhisperForConditionalGeneration , __lowerCamelCase : WhisperProcessor , __lowerCamelCase : AutoencoderKL , __lowerCamelCase : CLIPTextModel , __lowerCamelCase : CLIPTokenizer , __lowerCamelCase : UNetaDConditionModel , __lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCamelCase : StableDiffusionSafetyChecker , __lowerCamelCase : CLIPImageProcessor , ):
super().__init__()
if safety_checker is None:
logger.warning(
f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
" results in services or applications open to the public. Both the diffusers team and Hugging Face"
" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
" it only for use-cases that involve analyzing network behavior or auditing its results. For more"
" information, please have a look at https://github.com/huggingface/diffusers/pull/254 ." )
self.register_modules(
speech_model=__lowerCamelCase , speech_processor=__lowerCamelCase , vae=__lowerCamelCase , text_encoder=__lowerCamelCase , tokenizer=__lowerCamelCase , unet=__lowerCamelCase , scheduler=__lowerCamelCase , feature_extractor=__lowerCamelCase , )
def _snake_case ( self : Tuple , __lowerCamelCase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCamelCase )
def _snake_case ( self : Any ):
self.enable_attention_slicing(__lowerCamelCase )
@torch.no_grad()
def __call__( self : Optional[int] , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[str]=16000 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 512 , __lowerCamelCase : int = 50 , __lowerCamelCase : float = 7.5 , __lowerCamelCase : Optional[Union[str, List[str]]] = None , __lowerCamelCase : Optional[int] = 1 , __lowerCamelCase : float = 0.0 , __lowerCamelCase : Optional[torch.Generator] = None , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : Optional[str] = "pil" , __lowerCamelCase : bool = True , __lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCamelCase : int = 1 , **__lowerCamelCase : Dict , ):
SCREAMING_SNAKE_CASE = self.speech_processor.feature_extractor(
__lowerCamelCase , return_tensors="pt" , sampling_rate=__lowerCamelCase ).input_features.to(self.device )
SCREAMING_SNAKE_CASE = self.speech_model.generate(__lowerCamelCase , max_length=480000 )
SCREAMING_SNAKE_CASE = self.speech_processor.tokenizer.batch_decode(__lowerCamelCase , skip_special_tokens=__lowerCamelCase , normalize=__lowerCamelCase )[
0
]
if isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = 1
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = len(__lowerCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(__lowerCamelCase )}" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}." )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCamelCase , __lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(__lowerCamelCase )}." )
# get prompt text embeddings
SCREAMING_SNAKE_CASE = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = text_embeddings.shape
SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 , __lowerCamelCase , 1 )
SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCamelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
SCREAMING_SNAKE_CASE = [""] * batch_size
elif type(__lowerCamelCase ) is not type(__lowerCamelCase ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCamelCase )} !="
f" {type(__lowerCamelCase )}." )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(__lowerCamelCase ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(__lowerCamelCase )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
SCREAMING_SNAKE_CASE = negative_prompt
SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
SCREAMING_SNAKE_CASE = self.tokenizer(
__lowerCamelCase , padding="max_length" , max_length=__lowerCamelCase , truncation=__lowerCamelCase , return_tensors="pt" , )
SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(1 , __lowerCamelCase , 1 )
SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
SCREAMING_SNAKE_CASE = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device="cpu" , dtype=__lowerCamelCase ).to(
self.device )
else:
SCREAMING_SNAKE_CASE = torch.randn(__lowerCamelCase , generator=__lowerCamelCase , device=self.device , dtype=__lowerCamelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
SCREAMING_SNAKE_CASE = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
SCREAMING_SNAKE_CASE = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
SCREAMING_SNAKE_CASE = {}
if accepts_eta:
SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(__lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(__lowerCamelCase , __lowerCamelCase )
# predict the noise residual
SCREAMING_SNAKE_CASE = self.unet(__lowerCamelCase , __lowerCamelCase , encoder_hidden_states=__lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
SCREAMING_SNAKE_CASE = self.scheduler.step(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , **__lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents
SCREAMING_SNAKE_CASE = self.vae.decode(__lowerCamelCase ).sample
SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
SCREAMING_SNAKE_CASE = self.numpy_to_pil(__lowerCamelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__lowerCamelCase , nsfw_content_detected=__lowerCamelCase ) | 16 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''efficientformer'''
def __init__( self , snake_case = [3, 2, 6, 4] , snake_case = [48, 96, 224, 448] , snake_case = [True, True, True, True] , snake_case = 448 , snake_case = 32 , snake_case = 4 , snake_case = 7 , snake_case = 5 , snake_case = 8 , snake_case = 4 , snake_case = 0.0 , snake_case = 16 , snake_case = 3 , snake_case = 3 , snake_case = 3 , snake_case = 2 , snake_case = 1 , snake_case = 0.0 , snake_case = 1 , snake_case = True , snake_case = True , snake_case = 1E-5 , snake_case = "gelu" , snake_case = 0.02 , snake_case = 1E-12 , snake_case = 224 , snake_case = 1E-05 , **snake_case , ) -> None:
super().__init__(**snake_case )
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = depths
_UpperCAmelCase = mlp_expansion_ratio
_UpperCAmelCase = downsamples
_UpperCAmelCase = dim
_UpperCAmelCase = key_dim
_UpperCAmelCase = attention_ratio
_UpperCAmelCase = resolution
_UpperCAmelCase = pool_size
_UpperCAmelCase = downsample_patch_size
_UpperCAmelCase = downsample_stride
_UpperCAmelCase = downsample_pad
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = num_metaad_blocks
_UpperCAmelCase = distillation
_UpperCAmelCase = use_layer_scale
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = image_size
_UpperCAmelCase = batch_norm_eps
| 573 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase :
"""simple docstring"""
a__ = 42
a__ = None
a__ = None
def lowerCamelCase_ ( ) -> Any:
_UpperCamelCase : int = Node(1 )
_UpperCamelCase : List[Any] = Node(2 )
_UpperCamelCase : Dict = Node(3 )
_UpperCamelCase : List[str] = Node(4 )
_UpperCamelCase : Dict = Node(5 )
return tree
def lowerCamelCase_ ( UpperCAmelCase_ : Tuple ) -> Dict:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] ) -> Optional[Any]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def lowerCamelCase_ ( UpperCAmelCase_ : List[Any] ) -> Dict:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def lowerCamelCase_ ( UpperCAmelCase_ : Any ) -> Optional[int]:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> int:
_UpperCamelCase : Dict = []
if root is None:
return output
_UpperCamelCase : Optional[int] = deque([root] )
while process_queue:
_UpperCamelCase : Optional[Any] = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def lowerCamelCase_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = []
def populate_output(UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def lowerCamelCase_ ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int ) -> List[str]:
_UpperCamelCase : int = []
def populate_output(UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCamelCase__ , UpperCamelCase__ )
return output
def lowerCamelCase_ ( UpperCAmelCase_ : List[str] ) -> Optional[Any]:
if root is None:
return []
_UpperCamelCase : List[Any] = []
_UpperCamelCase : List[str] = 0
_UpperCamelCase : Optional[int] = height(UpperCamelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase : int = 1
else:
output.append(get_nodes_from_right_to_left(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCamelCase : int = 0
return output
def lowerCamelCase_ ( ) -> Any: # Main function for testing.
_UpperCamelCase : Optional[Any] = make_tree()
print(F'''In-order Traversal: {inorder(UpperCamelCase__ )}''' )
print(F'''Pre-order Traversal: {preorder(UpperCamelCase__ )}''' )
print(F'''Post-order Traversal: {postorder(UpperCamelCase__ )}''' , '\n' )
print(F'''Height of Tree: {height(UpperCamelCase__ )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(UpperCamelCase__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(UpperCamelCase__ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(UpperCamelCase__ , level=UpperCamelCase__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(UpperCamelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 720 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 648 | 0 |
"""simple docstring"""
def A ( __snake_case: float , __snake_case: int ) -> Dict:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__UpperCAmelCase ) , __UpperCAmelCase )
return number - int(__UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.3_45, 1))
print(decimal_isolate(35.3_45, 2))
print(decimal_isolate(35.3_45, 3))
print(decimal_isolate(-14.7_89, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.1_23, 1))
print(decimal_isolate(-14.1_23, 2))
print(decimal_isolate(-14.1_23, 3)) | 545 | import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( UpperCamelCase):
"""simple docstring"""
__UpperCAmelCase = (DDIMParallelScheduler,)
__UpperCAmelCase = (("eta", 0.0), ("num_inference_steps", 5_0))
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : List[str] = {
'num_train_timesteps': 1_000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**_UpperCAmelCase )
return config
def lowercase_ ( self , **_UpperCAmelCase ):
__snake_case : int = self.scheduler_classes[0]
__snake_case : int = self.get_scheduler_config(**_UpperCAmelCase )
__snake_case : Any = scheduler_class(**_UpperCAmelCase )
__snake_case , __snake_case : List[str] = 10, 0.0
__snake_case : str = self.dummy_model()
__snake_case : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(_UpperCAmelCase )
for t in scheduler.timesteps:
__snake_case : Tuple = model(_UpperCAmelCase , _UpperCAmelCase )
__snake_case : List[str] = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).prev_sample
return sample
def lowercase_ ( self ):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase )
def lowercase_ ( self ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_UpperCAmelCase )
__snake_case : str = self.scheduler_classes[0]
__snake_case : Union[str, Any] = self.get_scheduler_config(steps_offset=1 )
__snake_case : Optional[Any] = scheduler_class(**_UpperCAmelCase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def lowercase_ ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase )
def lowercase_ ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase )
def lowercase_ ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase )
def lowercase_ ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_UpperCAmelCase )
def lowercase_ ( self ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_UpperCAmelCase )
def lowercase_ ( self ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_UpperCAmelCase )
def lowercase_ ( self ):
self.check_over_configs(thresholding=_UpperCAmelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_UpperCAmelCase , prediction_type=_UpperCAmelCase , sample_max_value=_UpperCAmelCase , )
def lowercase_ ( self ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_UpperCAmelCase )
def lowercase_ ( self ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase )
def lowercase_ ( self ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_UpperCAmelCase , eta=_UpperCAmelCase )
def lowercase_ ( self ):
__snake_case : List[Any] = self.scheduler_classes[0]
__snake_case : List[str] = self.get_scheduler_config()
__snake_case : Optional[int] = scheduler_class(**_UpperCAmelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def lowercase_ ( self ):
__snake_case : Dict = self.scheduler_classes[0]
__snake_case : Any = self.get_scheduler_config()
__snake_case : Any = scheduler_class(**_UpperCAmelCase )
__snake_case , __snake_case : List[str] = 10, 0.0
scheduler.set_timesteps(_UpperCAmelCase )
__snake_case : List[Any] = self.dummy_model()
__snake_case : Any = self.dummy_sample_deter
__snake_case : Union[str, Any] = self.dummy_sample_deter + 0.1
__snake_case : Dict = self.dummy_sample_deter - 0.1
__snake_case : Optional[int] = samplea.shape[0]
__snake_case : str = torch.stack([samplea, samplea, samplea] , dim=0 )
__snake_case : List[str] = torch.arange(_UpperCAmelCase )[0:3, None].repeat(1 , _UpperCAmelCase )
__snake_case : str = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__snake_case : Dict = scheduler.batch_step_no_noise(_UpperCAmelCase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _UpperCAmelCase )
__snake_case : List[str] = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowercase_ ( self ):
__snake_case : Optional[int] = self.full_loop()
__snake_case : str = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : Tuple = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def lowercase_ ( self ):
__snake_case : Any = self.full_loop(prediction_type='v_prediction' )
__snake_case : Any = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : str = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowercase_ ( self ):
# We specify different beta, so that the first alpha is 0.99
__snake_case : int = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
__snake_case : Dict = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : int = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowercase_ ( self ):
# We specify different beta, so that the first alpha is 0.99
__snake_case : Optional[int] = self.full_loop(set_alpha_to_one=_UpperCAmelCase , beta_start=0.01 )
__snake_case : Dict = torch.sum(torch.abs(_UpperCAmelCase ) )
__snake_case : List[str] = torch.mean(torch.abs(_UpperCAmelCase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 576 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_lowerCamelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_lowerCamelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_lowerCamelCase )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowerCamelCase_ = parse_args()
# Import training_script as a module.
lowerCamelCase_ = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowerCamelCase_ = script_fpath.stem
lowerCamelCase_ = importlib.import_module(_lowerCamelCase )
# Patch sys.argv
lowerCamelCase_ = [args.training_script] + args.training_script_args + ["--tpu_num_cores", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 704 |
def _SCREAMING_SNAKE_CASE ( lowercase : list[int] , lowercase : list[int] ):
'''simple docstring'''
lowerCamelCase_ = len(lowercase )
print('The following activities are selected:' )
# The first activity is always selected
lowerCamelCase_ = 0
print(lowercase , end=',' )
# Consider rest of the activities
for j in range(lowercase ):
# If this activity has start time greater than
# or equal to the finish time of previously
# selected activity, then select it
if start[j] >= finish[i]:
print(lowercase , end=',' )
lowerCamelCase_ = j
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase : Tuple = [1, 3, 0, 5, 8, 5]
lowerCamelCase : int = [2, 4, 6, 7, 9, 9]
print_max_activities(start, finish)
| 651 | 0 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=4 , ):
'''simple docstring'''
UpperCamelCase__ = parent
UpperCamelCase__ = batch_size
UpperCamelCase__ = seq_length
UpperCamelCase__ = is_training
UpperCamelCase__ = use_attention_mask
UpperCamelCase__ = use_token_type_ids
UpperCamelCase__ = use_labels
UpperCamelCase__ = vocab_size
UpperCamelCase__ = hidden_size
UpperCamelCase__ = num_hidden_layers
UpperCamelCase__ = num_attention_heads
UpperCamelCase__ = intermediate_size
UpperCamelCase__ = hidden_act
UpperCamelCase__ = hidden_dropout_prob
UpperCamelCase__ = attention_probs_dropout_prob
UpperCamelCase__ = max_position_embeddings
UpperCamelCase__ = type_vocab_size
UpperCamelCase__ = type_sequence_label_size
UpperCamelCase__ = initializer_range
UpperCamelCase__ = num_choices
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ = None
if self.use_attention_mask:
UpperCamelCase__ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCamelCase__ = None
if self.use_token_type_ids:
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCamelCase__ = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.prepare_config_and_inputs()
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ = config_and_inputs
UpperCamelCase__ = True
UpperCamelCase__ = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCamelCase__ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase__ ( UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = True
_UpperCamelCase : Tuple = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = FlaxBertModelTester(self )
@slow
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = FlaxBertModel.from_pretrained("bert-base-cased" )
UpperCamelCase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case )
| 551 |
def UpperCamelCase_( _A :Union[str, Any] )-> List[str]:
UpperCamelCase__ = [0] * len(_A )
UpperCamelCase__ = []
UpperCamelCase__ = []
UpperCamelCase__ = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(_A ) ):
if indegree[i] == 0:
queue.append(_A )
while queue:
UpperCamelCase__ = queue.pop(0 )
cnt += 1
topo.append(_A )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(_A )
if cnt != len(_A ):
print("Cycle exists" )
else:
print(_A )
# Adjacency List of Graph
__UpperCamelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 551 | 1 |
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
UpperCAmelCase : int ="""3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None)
| 504 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None):
if attention_mask is None:
UpperCamelCase_ = tf.cast(tf.math.not_equal(_lowerCAmelCase , config.pad_token_id) , tf.inta)
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class _lowercase :
'''simple docstring'''
lowercase__ = OPTConfig
lowercase__ = {}
lowercase__ = """gelu"""
def __init__( self , snake_case__ , snake_case__=13 , snake_case__=7 , snake_case__=True , snake_case__=False , snake_case__=99 , snake_case__=16 , snake_case__=2 , snake_case__=4 , snake_case__=4 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=20 , snake_case__=2 , snake_case__=1 , snake_case__=0 , snake_case__=16 , snake_case__=16 , ):
'''simple docstring'''
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
UpperCamelCase_ = embed_dim
UpperCamelCase_ = word_embed_proj_dim
UpperCamelCase_ = False
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCamelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCamelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCamelCase_ = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case__ , **self.config_updates , )
UpperCamelCase_ = prepare_opt_inputs_dict(snake_case__ , snake_case__ )
return config, inputs_dict
def _lowerCamelCase ( self , snake_case__ , snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = TFOPTModel(config=snake_case__ )
UpperCamelCase_ = inputs_dict["input_ids"]
UpperCamelCase_ = input_ids[:1, :]
UpperCamelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCamelCase_ = 1
# first forward pass
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , use_cache=snake_case__ )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCamelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCamelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ )[0]
UpperCamelCase_ = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCamelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCamelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 )
@require_tf
class _lowercase (a_ , a_ , unittest.TestCase ):
'''simple docstring'''
lowercase__ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
lowercase__ = (TFOPTForCausalLM,) if is_tf_available() else ()
lowercase__ = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
lowercase__ = False
lowercase__ = False
lowercase__ = False
lowercase__ = 10
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TFOPTModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case__ , snake_case__ ):
if hasattr(snake_case__ , "weight" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case__ , "weight" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
UpperCamelCase_ = model_class(config=snake_case__ )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case__ )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_input_embeddings() )
UpperCamelCase_ = _get_word_embedding_weight(snake_case__ , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
UpperCamelCase_ = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case__ )
# check that weights remain the same after resizing
UpperCamelCase_ = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase_ = False
self.assertTrue(snake_case__ )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case__ )
UpperCamelCase_ = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
UpperCamelCase_ = False
self.assertTrue(snake_case__ )
def _lowerCAmelCase (_lowerCAmelCase):
return tf.constant(_lowerCAmelCase , dtype=tf.intaa)
@require_tf
class _lowercase (unittest.TestCase ):
'''simple docstring'''
lowercase__ = 99
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = tf.ones((4, 1) , dtype=tf.intaa ) * 2
UpperCamelCase_ = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
UpperCamelCase_ = input_ids.shape[0]
UpperCamelCase_ = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@slow
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TFOPTModel.from_pretrained("facebook/opt-350m" )
UpperCamelCase_ = _long_tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
UpperCamelCase_ = tf.not_equal(snake_case__ , model.config.pad_token_id )
with tf.GradientTape():
UpperCamelCase_ = model(input_ids=snake_case__ , attention_mask=snake_case__ ).last_hidden_state
UpperCamelCase_ = (1, 11, 512)
self.assertEqual(output.shape , snake_case__ )
UpperCamelCase_ = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-3 ) )
UpperCamelCase_ = tf.function(snake_case__ , jit_compile=snake_case__ )
UpperCamelCase_ = xla_generate(snake_case__ , snake_case__ )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case__ , atol=4e-2 ) )
@require_tf
@slow
class _lowercase (unittest.TestCase ):
'''simple docstring'''
def _lowerCamelCase ( self ):
'''simple docstring'''
super().setUp()
UpperCamelCase_ = "facebook/opt-350m"
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(self.path_model )
UpperCamelCase_ = GPTaTokenizer.from_pretrained(self.path_model )
UpperCamelCase_ = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ , add_special_tokens=snake_case__ )
UpperCamelCase_ = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
UpperCamelCase_ = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
UpperCamelCase_ = tf.function(snake_case__ , jit_compile=snake_case__ )
UpperCamelCase_ = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case__ , snake_case__ , atol=1e-4 ) )
@require_tf
@slow
class _lowercase (unittest.TestCase ):
'''simple docstring'''
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "facebook/opt-125m"
UpperCamelCase_ = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCamelCase_ = []
UpperCamelCase_ = GPTaTokenizer.from_pretrained(snake_case__ )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(snake_case__ , max_length=10 )
UpperCamelCase_ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "facebook/opt-350m"
UpperCamelCase_ = GPTaTokenizer.from_pretrained(snake_case__ )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(snake_case__ )
UpperCamelCase_ = "left"
# use different length sentences to test batching
UpperCamelCase_ = [
"Hello, my dog is a little",
"Today, I",
]
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" , padding=snake_case__ )
UpperCamelCase_ = inputs["input_ids"]
UpperCamelCase_ = model.generate(input_ids=snake_case__ , attention_mask=inputs["attention_mask"] )
UpperCamelCase_ = tokenizer(sentences[0] , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(input_ids=snake_case__ )
UpperCamelCase_ = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["attention_mask"][-1] , tf.intaa ) )
UpperCamelCase_ = tokenizer(sentences[1] , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(input_ids=snake_case__ , max_length=model.config.max_length - num_paddings )
UpperCamelCase_ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
UpperCamelCase_ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case__ )
UpperCamelCase_ = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case__ )
UpperCamelCase_ = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(snake_case__ , snake_case__ )
self.assertListEqual(snake_case__ , [non_padded_sentence, padded_sentence] )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = "facebook/opt-350m"
UpperCamelCase_ = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
UpperCamelCase_ = []
UpperCamelCase_ = GPTaTokenizer.from_pretrained(snake_case__ )
UpperCamelCase_ = TFOPTForCausalLM.from_pretrained(snake_case__ )
for prompt in self.prompts:
UpperCamelCase_ = tokenizer(snake_case__ , return_tensors="tf" ).input_ids
UpperCamelCase_ = model.generate(snake_case__ , max_length=10 )
UpperCamelCase_ = tokenizer.batch_decode(snake_case__ , skip_special_tokens=snake_case__ )
predicted_outputs += generated_string
self.assertListEqual(snake_case__ , snake_case__ )
| 504 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
lowercase__ : Any = 'platform'
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def a__ ( lowercase : Tuple, lowercase : Optional[int], lowercase : Any=None, lowercase : Optional[Any]=None, lowercase : Dict=None, lowercase : List[str]=None, lowercase : Any=None, lowercase : Tuple=None, ) -> Tuple:
"""simple docstring"""
if attention_mask is None:
_UpperCamelCase = np.where(input_ids != config.pad_token_id, 1, 0 )
if decoder_attention_mask is None:
_UpperCamelCase = np.where(decoder_input_ids != config.pad_token_id, 1, 0 )
if head_mask is None:
_UpperCamelCase = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str]=13 , lowerCAmelCase__ : str=7 , lowerCAmelCase__ : Optional[Any]=True , lowerCAmelCase__ : Tuple=False , lowerCAmelCase__ : Tuple=99 , lowerCAmelCase__ : Dict=16 , lowerCAmelCase__ : List[str]=2 , lowerCAmelCase__ : int=4 , lowerCAmelCase__ : Tuple=4 , lowerCAmelCase__ : Dict="gelu" , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Union[str, Any]=0.1 , lowerCAmelCase__ : Dict=32 , lowerCAmelCase__ : str=2 , lowerCAmelCase__ : Tuple=1 , lowerCAmelCase__ : Tuple=0 , lowerCAmelCase__ : List[Any]=0.02 , ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = eos_token_id
_UpperCamelCase = pad_token_id
_UpperCamelCase = bos_token_id
_UpperCamelCase = initializer_range
def snake_case__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCamelCase = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowerCAmelCase__ , )
_UpperCamelCase = prepare_blenderbot_inputs_dict(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return config, inputs_dict
def snake_case__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self : str , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def snake_case__ ( self : int , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 20
_UpperCamelCase = model_class_name(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , past_key_values=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase__ , decoder_position_ids=lowerCAmelCase__ , )
_UpperCamelCase = model.decode(lowerCAmelCase__ , lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ )
_UpperCamelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
_snake_case : List[Any] = 9_9
def snake_case__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def snake_case__ ( self : Dict ) -> List[str]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._get_config_and_data()
_UpperCamelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase = lm_model(input_ids=lowerCAmelCase__ )
_UpperCamelCase = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def snake_case__ ( self : Any ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCamelCase = FlaxBlenderbotForConditionalGeneration(lowerCAmelCase__ )
_UpperCamelCase = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCamelCase = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCamelCase = lm_model(input_ids=lowerCAmelCase__ , decoder_input_ids=lowerCAmelCase__ )
_UpperCamelCase = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , lowerCAmelCase__ )
def snake_case__ ( self : List[str] ) -> Dict:
'''simple docstring'''
_UpperCamelCase = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCamelCase = shift_tokens_right(lowerCAmelCase__ , 1 , 2 )
_UpperCamelCase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
_UpperCamelCase = np.equal(lowerCAmelCase__ , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(lowerCAmelCase__ , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowerCAmelCase ( __magic_name__ , unittest.TestCase , __magic_name__ ):
"""simple docstring"""
_snake_case : Dict = True
_snake_case : str = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
_snake_case : Tuple = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def snake_case__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = FlaxBlenderbotModelTester(self )
def snake_case__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def snake_case__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = model_class(lowerCAmelCase__ )
@jax.jit
def encode_jitted(lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : Dict=None , **lowerCAmelCase__ : Optional[Any] ):
return model.encode(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = encode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def snake_case__ ( self : Dict ) -> str:
'''simple docstring'''
_UpperCamelCase , _UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase = model_class(lowerCAmelCase__ )
_UpperCamelCase = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ):
return model.decode(
decoder_input_ids=lowerCAmelCase__ , decoder_attention_mask=lowerCAmelCase__ , encoder_outputs=lowerCAmelCase__ , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase = decode_jitted(**lowerCAmelCase__ ).to_tuple()
self.assertEqual(len(lowerCAmelCase__ ) , len(lowerCAmelCase__ ) )
for jitted_output, output in zip(lowerCAmelCase__ , lowerCAmelCase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def snake_case__ ( self : Dict ) -> Any:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCamelCase = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCamelCase = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCamelCase = model(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def snake_case__ ( self : int ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
_UpperCamelCase = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_UpperCamelCase = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=lowerCAmelCase__ )
_UpperCamelCase = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_UpperCamelCase = ['''Sam''']
_UpperCamelCase = tokenizer(lowerCAmelCase__ , return_tensors='''jax''' )
_UpperCamelCase = model.generate(**lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCamelCase = '''Sam is a great name. It means "sun" in Gaelic.'''
_UpperCamelCase = tokenizer.batch_decode(lowerCAmelCase__ , **lowerCAmelCase__ )
assert generated_txt[0].strip() == tgt_text
| 98 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class _UpperCAmelCase :
def __init__( self , lowercase_ = "cpu" , lowercase_ = "openai/clip-vit-large-patch14" ) -> None:
UpperCAmelCase = device
UpperCAmelCase = CLIPTokenizerFast.from_pretrained(lowercase_ )
UpperCAmelCase = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
UpperCAmelCase = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
UpperCAmelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
UpperCAmelCase = torchvision.transforms.Resize(2_2_4 )
UpperCAmelCase = torchvision.transforms.CenterCrop(2_2_4 )
def a_ ( self , lowercase_ ) -> Any:
UpperCAmelCase = self.resize(lowercase_ )
UpperCAmelCase = self.center_crop(lowercase_ )
UpperCAmelCase = self.normalize(lowercase_ )
return images
def __call__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> Union[str, Any]:
UpperCAmelCase = self.tokenizer(text=lowercase_ , **lowercase_ )
UpperCAmelCase = self.preprocess_img(lowercase_ )
UpperCAmelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class _UpperCAmelCase ( nn.Module ):
def __init__( self , lowercase_=1_0 , lowercase_=0.0_1 , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=None , lowercase_=False , lowercase_=True , lowercase_="image" , lowercase_=True , lowercase_=False , lowercase_=False , lowercase_=False , ) -> None:
super().__init__()
UpperCAmelCase = None
UpperCAmelCase = device if device else get_device()
if vqgan:
UpperCAmelCase = vqgan
else:
UpperCAmelCase = load_vqgan(self.device , conf_path=lowercase_ , ckpt_path=lowercase_ )
self.vqgan.eval()
if clip:
UpperCAmelCase = clip
else:
UpperCAmelCase = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
UpperCAmelCase = ProcessorGradientFlow(device=self.device )
UpperCAmelCase = iterations
UpperCAmelCase = lr
UpperCAmelCase = log
UpperCAmelCase = make_grid
UpperCAmelCase = return_val
UpperCAmelCase = quantize
UpperCAmelCase = self.vqgan.decoder.z_shape
def a_ ( self , lowercase_=None , lowercase_=None , lowercase_=5 , lowercase_=True ) -> Dict:
UpperCAmelCase = []
if output_path is None:
UpperCAmelCase = './animation.gif'
if input_path is None:
UpperCAmelCase = self.save_path
UpperCAmelCase = sorted(glob(input_path + '/*' ) )
if not len(lowercase_ ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(lowercase_ ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
UpperCAmelCase = total_duration / len(lowercase_ )
UpperCAmelCase = [frame_duration] * len(lowercase_ )
if extend_frames:
UpperCAmelCase = 1.5
UpperCAmelCase = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(lowercase_ ) )
imageio.mimsave(lowercase_ , lowercase_ , duration=lowercase_ )
print(F"gif saved to {output_path}" )
def a_ ( self , lowercase_=None , lowercase_=None ) -> List[Any]:
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
UpperCAmelCase = preprocess(Image.open(lowercase_ ) , target_image_size=2_5_6 ).to(self.device )
UpperCAmelCase = preprocess_vqgan(lowercase_ )
UpperCAmelCase , *UpperCAmelCase = self.vqgan.encode(lowercase_ )
return z
def a_ ( self , lowercase_ ) -> Optional[int]:
UpperCAmelCase = self.latent.detach().requires_grad_()
UpperCAmelCase = base_latent + transform_vector
if self.quantize:
UpperCAmelCase , *UpperCAmelCase = self.vqgan.quantize(lowercase_ )
else:
UpperCAmelCase = trans_latent
return self.vqgan.decode(lowercase_ )
def a_ ( self , lowercase_ , lowercase_ , lowercase_=None ) -> str:
UpperCAmelCase = self.clip_preprocessor(text=lowercase_ , images=lowercase_ , return_tensors='pt' , padding=lowercase_ )
UpperCAmelCase = self.clip(**lowercase_ )
UpperCAmelCase = clip_outputs.logits_per_image
if weights is not None:
UpperCAmelCase = similarity_logits * weights
return similarity_logits.sum()
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
UpperCAmelCase = self._get_clip_similarity(pos_prompts['prompts'] , lowercase_ , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
UpperCAmelCase = self._get_clip_similarity(neg_prompts['prompts'] , lowercase_ , weights=neg_prompts['weights'] )
else:
UpperCAmelCase = torch.tensor([1] , device=self.device )
UpperCAmelCase = -torch.log(lowercase_ ) + torch.log(lowercase_ )
return loss
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
UpperCAmelCase = torch.randn_like(self.latent , requires_grad=lowercase_ , device=self.device )
UpperCAmelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
UpperCAmelCase = self._add_vector(lowercase_ )
UpperCAmelCase = loop_post_process(lowercase_ )
UpperCAmelCase = self._get_CLIP_loss(lowercase_ , lowercase_ , lowercase_ )
print('CLIP loss' , lowercase_ )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=lowercase_ )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def a_ ( self , lowercase_ , lowercase_ , lowercase_ ) -> Dict:
wandb.init(reinit=lowercase_ , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
UpperCAmelCase = Image.open(lowercase_ )
UpperCAmelCase = image.resize((2_5_6, 2_5_6) )
wandb.log('Original Image' , wandb.Image(lowercase_ ) )
def a_ ( self , lowercase_ ) -> Tuple:
if not prompts:
return []
UpperCAmelCase = []
UpperCAmelCase = []
if isinstance(lowercase_ , lowercase_ ):
UpperCAmelCase = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(lowercase_ , (tuple, list) ):
UpperCAmelCase = prompt[0]
UpperCAmelCase = float(prompt[1] )
elif ":" in prompt:
UpperCAmelCase , UpperCAmelCase = prompt.split(':' )
UpperCAmelCase = float(lowercase_ )
else:
UpperCAmelCase = prompt
UpperCAmelCase = 1.0
processed_prompts.append(lowercase_ )
weights.append(lowercase_ )
return {
"prompts": processed_prompts,
"weights": torch.tensor(lowercase_ , device=self.device ),
}
def a_ ( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=True , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=None , ) -> List[str]:
if image_path:
UpperCAmelCase = self._get_latent(lowercase_ )
else:
UpperCAmelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(lowercase_ , lowercase_ , lowercase_ )
assert pos_prompts, "You must provide at least one positive prompt."
UpperCAmelCase = self.process_prompts(lowercase_ )
UpperCAmelCase = self.process_prompts(lowercase_ )
if save_final and save_path is None:
UpperCAmelCase = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(lowercase_ ):
os.makedirs(lowercase_ )
else:
UpperCAmelCase = save_path + '_' + get_timestamp()
os.makedirs(lowercase_ )
UpperCAmelCase = save_path
UpperCAmelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(lowercase_ ) )
UpperCAmelCase = loop_post_process(lowercase_ )
for iter, transformed_img in enumerate(self._optimize_CLIP(lowercase_ , lowercase_ , lowercase_ ) ):
if show_intermediate:
show_pil(lowercase_ )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}.png" ) )
if self.log:
wandb.log({'Image': wandb.Image(lowercase_ )} )
if show_final:
show_pil(lowercase_ )
if save_final:
transformed_img.save(os.path.join(self.save_path , F"iter_{iter:03d}_final.png" ) )
| 373 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
a_ :Dict = None
a_ :List[str] = logging.get_logger(__name__)
a_ :Dict = "▁"
a_ :Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a_ :Union[str, Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
a_ :List[Any] = {
"google/pegasus-xsum": 512,
}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PegasusTokenizer
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, _snake_case : Any=None, _snake_case : Optional[Any]=None, _snake_case : Tuple="<pad>", _snake_case : Tuple="</s>", _snake_case : List[str]="<unk>", _snake_case : Any="<mask_2>", _snake_case : Optional[Any]="<mask_1>", _snake_case : Tuple=None, _snake_case : str=1_0_3, **_snake_case : Dict, ) ->List[Any]:
snake_case__ : Any = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case, _snake_case ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_snake_case )}, but is'''
F''' {type(_snake_case )}''' )
snake_case__ : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_snake_case ), self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : Optional[int] = additional_special_tokens_extended
else:
snake_case__ : Optional[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset )]
super().__init__(
_snake_case, tokenizer_file=_snake_case, pad_token=_snake_case, eos_token=_snake_case, unk_token=_snake_case, mask_token=_snake_case, mask_token_sent=_snake_case, offset=_snake_case, additional_special_tokens=_snake_case, **_snake_case, )
snake_case__ : str = vocab_file
snake_case__ : int = False if not self.vocab_file else True
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->Dict:
snake_case__ : int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ ( self : Dict, _snake_case : List, _snake_case : Optional[List] = None, _snake_case : bool = False ) ->List[int]:
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ ( self : Any, _snake_case : Union[str, Any], _snake_case : Union[str, Any]=None ) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[Any], _snake_case : str, _snake_case : Optional[str] = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
_snake_case, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file, _snake_case )
return (out_vocab_file,)
| 243 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
_SCREAMING_SNAKE_CASE = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image""", """mask_image"""]
_SCREAMING_SNAKE_CASE = [
"""prompt""",
"""negative_prompt""",
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
"""mask_image""",
]
_SCREAMING_SNAKE_CASE = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""negative_prompt""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
_SCREAMING_SNAKE_CASE = False
@property
def lowercase_ ( self : Optional[Any] ) ->Optional[Any]:
return 3_2
@property
def lowercase_ ( self : int ) ->str:
return 3_2
@property
def lowercase_ ( self : Any ) ->List[str]:
return self.time_input_dim
@property
def lowercase_ ( self : Optional[Any] ) ->str:
return self.time_input_dim * 4
@property
def lowercase_ ( self : Tuple ) ->int:
return 1_0_0
@property
def lowercase_ ( self : str ) ->Dict:
snake_case__ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('YiYiXu/tiny-random-mclip-base' )
return tokenizer
@property
def lowercase_ ( self : Any ) ->Optional[int]:
torch.manual_seed(0 )
snake_case__ : str = MCLIPConfig(
numDims=self.cross_attention_dim, transformerDimensions=self.text_embedder_hidden_size, hidden_size=self.text_embedder_hidden_size, intermediate_size=3_7, num_attention_heads=4, num_hidden_layers=5, vocab_size=1_0_0_5, )
snake_case__ : Optional[Any] = MultilingualCLIP(_snake_case )
snake_case__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase_ ( self : Tuple ) ->Optional[int]:
torch.manual_seed(0 )
snake_case__ : Optional[Any] = {
'in_channels': 9,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'text_image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'text_image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
snake_case__ : Dict = UNetaDConditionModel(**_snake_case )
return model
@property
def lowercase_ ( self : Dict ) ->Optional[int]:
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase_ ( self : Union[str, Any] ) ->List[Any]:
torch.manual_seed(0 )
snake_case__ : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase_ ( self : Any ) ->Any:
snake_case__ : int = self.dummy_text_encoder
snake_case__ : str = self.dummy_tokenizer
snake_case__ : Any = self.dummy_unet
snake_case__ : Tuple = self.dummy_movq
snake_case__ : int = DDIMScheduler(
num_train_timesteps=1_0_0_0, beta_schedule='linear', beta_start=0.0_0_0_8_5, beta_end=0.0_1_2, clip_sample=_snake_case, set_alpha_to_one=_snake_case, steps_offset=1, prediction_type='epsilon', thresholding=_snake_case, )
snake_case__ : Optional[int] = {
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowercase_ ( self : str, _snake_case : Any, _snake_case : int=0 ) ->str:
snake_case__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : str = floats_tensor((1, self.cross_attention_dim), rng=random.Random(seed + 1 ) ).to(_snake_case )
# create init_image
snake_case__ : Tuple = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(_snake_case ) ).to(_snake_case )
snake_case__ : Optional[Any] = image.cpu().permute(0, 2, 3, 1 )[0]
snake_case__ : Tuple = Image.fromarray(np.uinta(_snake_case ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create mask
snake_case__ : Any = np.ones((6_4, 6_4), dtype=np.floataa )
snake_case__ : Optional[Any] = 0
if str(_snake_case ).startswith('mps' ):
snake_case__ : Union[str, Any] = torch.manual_seed(_snake_case )
else:
snake_case__ : Any = torch.Generator(device=_snake_case ).manual_seed(_snake_case )
snake_case__ : int = {
'prompt': 'horse',
'image': init_image,
'mask_image': mask,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 2,
'guidance_scale': 4.0,
'output_type': 'np',
}
return inputs
def lowercase_ ( self : Optional[int] ) ->Optional[Any]:
snake_case__ : int = 'cpu'
snake_case__ : str = self.get_dummy_components()
snake_case__ : Any = self.pipeline_class(**_snake_case )
snake_case__ : Optional[Any] = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs(_snake_case ) )
snake_case__ : List[Any] = output.images
snake_case__ : List[Any] = pipe(
**self.get_dummy_inputs(_snake_case ), return_dict=_snake_case, )[0]
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : int = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Any = np.array(
[0.8_3_2_6_9_1_9, 0.7_3_7_9_0_4_6_7, 0.2_0_9_1_8_5_8_1, 0.9_3_0_9_6_1_2, 0.5_5_1_1_7_9_1, 0.4_3_7_1_3_3_2_8, 0.5_5_1_3_3_2_1, 0.4_9_9_2_2_9_3_4, 0.5_9_4_9_7_7_8_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def lowercase_ ( self : Any ) ->List[Any]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Dict ) ->int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self : Optional[int] ) ->List[str]:
snake_case__ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy' )
snake_case__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
snake_case__ : Union[str, Any] = np.ones((7_6_8, 7_6_8), dtype=np.floataa )
snake_case__ : str = 0
snake_case__ : List[str] = 'a hat'
snake_case__ : Any = KandinskyPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-prior', torch_dtype=torch.floataa )
pipe_prior.to(_snake_case )
snake_case__ : Union[str, Any] = KandinskyInpaintPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-1-inpaint', torch_dtype=torch.floataa )
snake_case__ : Tuple = pipeline.to(_snake_case )
pipeline.set_progress_bar_config(disable=_snake_case )
snake_case__ : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
snake_case__ , snake_case__ : Tuple = pipe_prior(
_snake_case, generator=_snake_case, num_inference_steps=5, negative_prompt='', ).to_tuple()
snake_case__ : Optional[Any] = pipeline(
_snake_case, image=_snake_case, mask_image=_snake_case, image_embeds=_snake_case, negative_image_embeds=_snake_case, generator=_snake_case, num_inference_steps=1_0_0, height=7_6_8, width=7_6_8, output_type='np', )
snake_case__ : Dict = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(_snake_case, _snake_case )
| 243 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class snake_case__ (unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : List[str] = tf.convert_to_tensor(
[
[
8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0
-0.5_6_2_0_0_4_4,
5.2_3_2_2_9_7_5_2,
4.0_3_8_6_3_9_3,
-6.8_7_9_8_3_7_8,
-0.5_4_7_8_5_8_0_2,
-3.2_0_1_2_1_5_3,
2.9_2_7_7_7_1_7_6,
1.8_8_1_7_1_9_5_3,
7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9
8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10
-9.8_5_7_1_1_8_3_6,
-5.9_6_2_0_9_2_3_6,
-1.1_3_0_3_9_1_6_1,
-7.1_1_1_5_2_9_4,
-0.8_3_6_9_6_3_3,
-5.3_1_8_6_4_0_8,
7.0_6_4_2_7_4_0_7,
0.8_1_3_6_9_3_4_4,
-0.8_2_0_2_3_8_1_7,
-5.9_1_7_9_7_9_6,
0.5_8_8_1_3_4_4_3,
-6.9_9_7_7_8_4_3_8,
4.7_1_5_5_1_1_8_9,
-0.1_8_7_7_1_6_3_7,
7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25
9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26
2.1_2_6_6_2_9_4_1,
-9.3_2_5_6_2_0_3_8,
2.3_5_6_5_2_5_2_2,
], # cummulative prob of 5 highest values <= 0.6
[
0.5_8_4_2_5_5_1_8,
4.5_3_1_3_9_2_3_8,
-5.5_7_5_1_0_4_6_4,
-6.2_8_0_3_0_6_9_9,
-7.1_9_5_2_9_5_0_3,
-4.0_2_1_2_2_5_5_1,
1.3_9_3_3_7_0_3_7,
-6.0_6_7_0_7_0_5_7,
1.5_9_4_8_0_5_1_7,
-9.6_4_3_1_1_9,
0.0_3_9_0_7_7_9_9,
0.6_7_2_3_1_7_6_2,
-8.8_8_2_0_6_7_2_6,
6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13
2.2_8_5_2_0_7_2_3,
4.8_2_7_6_7_5_0_6,
4.3_0_4_2_1_3_6_8,
8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17
5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18
-4.4_7_3_5_7_9_4,
7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20
-2.9_1_0_5_1_6_6_3,
2.6_1_9_4_6_0_7_7,
-2.5_6_7_4_7_6_2,
-9.4_8_9_5_9_3_0_2,
-4.0_2_9_2_2_6_4_5,
-1.3_5_4_1_6_9_1_8,
9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27
-5.8_9_4_7_8_5_5_3,
1.8_5_3_7_0_4_6_7,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
a__ : Dict = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
a__ : Dict = tf.convert_to_tensor(
[8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above
a__ : Union[str, Any] = tf_top_k_top_p_filtering(__lowercase , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 )
a__ : Optional[int] = output[output != -float("""inf""" )]
a__ : Any = tf.cast(
tf.where(tf.not_equal(__lowercase , tf.constant(-float("""inf""" ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , )
tf.debugging.assert_near(__lowercase , __lowercase , rtol=1E-12 )
tf.debugging.assert_equal(__lowercase , __lowercase )
@require_tf
class snake_case__ (unittest.TestCase , A__ ):
"""simple docstring"""
if is_tf_available():
__lowerCAmelCase :Tuple = {
"AutoModelForCausalLM": TFAutoModelForCausalLM,
"AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq,
"AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM,
"AutoModelForVision2Seq": TFAutoModelForVisionaSeq,
"LogitsProcessorList": TFLogitsProcessorList,
"MinLengthLogitsProcessor": TFMinLengthLogitsProcessor,
"create_tensor_fn": tf.convert_to_tensor,
"floats_tensor": floats_tensor,
"return_tensors": "tf",
}
@slow
def SCREAMING_SNAKE_CASE__( self ) -> Any:
"""simple docstring"""
a__ : Optional[int] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
a__ : Tuple = 2
a__ : Union[str, Any] = 2
class snake_case__ (tf.Module ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[Any]:
"""simple docstring"""
super(__lowercase , self ).__init__()
a__ : Optional[int] = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__lowercase , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Optional[Any]:
"""simple docstring"""
a__ : Optional[Any] = self.model.generate(
input_ids=__lowercase , attention_mask=__lowercase , max_new_tokens=__lowercase , return_dict_in_generate=__lowercase , )
return {"sequences": outputs["sequences"]}
a__ : Optional[Any] = [[2, 0], [1_0_2, 1_0_3]]
a__ : int = [[1, 0], [1, 1]]
a__ : Union[str, Any] = DummyModel(model=__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowercase , __lowercase , signatures={"""serving_default""": dummy_model.serving} )
a__ : Tuple = tf.saved_model.load(__lowercase ).signatures["""serving_default"""]
for batch_size in range(1 , len(__lowercase ) + 1 ):
a__ : Dict = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size] ),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size] ),
}
a__ : str = serving_func(**__lowercase )["""sequences"""]
a__ : Optional[Any] = test_model.generate(**__lowercase , max_new_tokens=__lowercase )
tf.debugging.assert_equal(__lowercase , __lowercase )
@slow
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : Dict = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
a__ : Any = 1
a__ : Any = 2
class snake_case__ (tf.Module ):
"""simple docstring"""
def __init__( self , __lowercase ) -> Optional[int]:
"""simple docstring"""
super(__lowercase , self ).__init__()
a__ : int = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids""" ),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask""" ),
) , jit_compile=__lowercase , )
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase ) -> Any:
"""simple docstring"""
a__ : Optional[int] = self.model.generate(
input_ids=__lowercase , attention_mask=__lowercase , max_new_tokens=__lowercase , return_dict_in_generate=__lowercase , )
return {"sequences": outputs["sequences"]}
a__ : Optional[int] = [[2], [1_0_2, 1_0_3]]
a__ : Any = [[1], [1, 1]]
a__ : int = DummyModel(model=__lowercase )
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(__lowercase , __lowercase , signatures={"""serving_default""": dummy_model.serving} )
a__ : Optional[int] = tf.saved_model.load(__lowercase ).signatures["""serving_default"""]
for input_row in range(len(__lowercase ) ):
a__ : Optional[int] = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]] ),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]] ),
}
a__ : int = serving_func(**__lowercase )["""sequences"""]
a__ : List[Any] = test_model.generate(**__lowercase , max_new_tokens=__lowercase )
tf.debugging.assert_equal(__lowercase , __lowercase )
@slow
@require_tensorflow_text
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=__lowercase )
class snake_case__ (tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self ) -> List[str]:
"""simple docstring"""
super().__init__()
a__ : Any = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(__lowercase , """spiece.model""" ) , """rb""" ).read() )
a__ : str = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""" )
def SCREAMING_SNAKE_CASE__( self , __lowercase , *__lowercase , **__lowercase ) -> str:
"""simple docstring"""
a__ : Optional[int] = self.tokenizer.tokenize(__lowercase )
a__ , a__ : Dict = text.pad_model_inputs(
__lowercase , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id )
a__ : List[str] = self.model.generate(input_ids=__lowercase , attention_mask=__lowercase )
return self.tokenizer.detokenize(__lowercase )
a__ : int = CompleteSentenceTransformer()
a__ : Dict = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""" )
a__ : Dict = complete_model(__lowercase )
a__ : List[Any] = tf.keras.Model(__lowercase , __lowercase )
keras_model.save(__lowercase )
def SCREAMING_SNAKE_CASE__( self ) -> str:
"""simple docstring"""
a__ : List[str] = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 1_0,
"""temperature""": 0.7,
}
a__ : Optional[Any] = 1_4
a__ : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
a__ : Dict = """Hello, my dog is cute and"""
a__ : Union[str, Any] = tokenizer(__lowercase , return_tensors="""tf""" )
a__ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
a__ : str = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
a__ : Tuple = model.generate(**__lowercase , eos_token_id=__lowercase , **__lowercase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
a__ : Optional[int] = [6_3_8, 1_9_8]
with tf.device(""":/CPU:0""" ):
tf.random.set_seed(0 )
a__ : str = model.generate(**__lowercase , eos_token_id=__lowercase , **__lowercase )
self.assertTrue(expectation == len(generated_tokens[0] ) )
def SCREAMING_SNAKE_CASE__( self ) -> List[Any]:
"""simple docstring"""
a__ : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
a__ : str = """Hugging Face is a technology company based in New York and Paris."""
a__ : List[str] = bart_tokenizer(__lowercase , return_tensors="""tf""" ).input_ids
a__ : Union[str, Any] = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
a__ : List[str] = bart_model.generate(__lowercase ).numpy()
class snake_case__ (A__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase=None , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
return super().call(__lowercase , **__lowercase )
a__ : Union[str, Any] = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""" )
a__ : List[str] = bart_model.generate(__lowercase , foo="""bar""" ).numpy()
self.assertTrue(np.array_equal(__lowercase , __lowercase ) )
class snake_case__ (bart_model.model.encoder.__class__ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__( self , __lowercase , **__lowercase ) -> Union[str, Any]:
"""simple docstring"""
return super().call(__lowercase , **__lowercase )
a__ : Any = FakeEncoder(bart_model.config , bart_model.model.shared )
a__ : int = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
a__ : Union[str, Any] = bart_model.generate(__lowercase ).numpy()
with self.assertRaises(__lowercase ):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(__lowercase , foo="""bar""" )
| 136 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_lowercase : str =argparse.ArgumentParser("Stable Diffusion script with intel optimization", add_help=False)
parser.add_argument("--dpm", action="store_true", help="Enable DPMSolver or not")
parser.add_argument("--steps", default=None, type=int, help="Num inference steps")
_lowercase : Optional[Any] =parser.parse_args()
_lowercase : Optional[Any] ="cpu"
_lowercase : List[str] ="a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings"
_lowercase : str ="path-to-your-trained-model"
_lowercase : Optional[Any] =StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_lowercase : Union[str, Any] =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_lowercase : Union[str, Any] =pipe.to(device)
# to channels last
_lowercase : int =pipe.unet.to(memory_format=torch.channels_last)
_lowercase : List[str] =pipe.vae.to(memory_format=torch.channels_last)
_lowercase : int =pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_lowercase : Union[str, Any] =pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_lowercase : str =torch.randn(2, 4, 64, 64)
_lowercase : Any =torch.rand(1) * 999
_lowercase : List[str] =torch.randn(2, 77, 768)
_lowercase : Tuple =(sample, timestep, encoder_hidden_status)
try:
_lowercase : Optional[int] =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_lowercase : List[Any] =ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : Optional[int] =ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_lowercase : Optional[Any] =ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_lowercase : Optional[Any] =ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_lowercase : str =666
_lowercase : Dict =torch.Generator(device).manual_seed(seed)
_lowercase : Tuple ={"generator": generator}
if args.steps is not None:
_lowercase : Tuple =args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_lowercase : Tuple =pipe(prompt, **generate_kwargs).images[0]
# save image
image.save("generated.png")
| 136 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : str = {
"speechbrain/m-ctc-t-large": "https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json",
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """mctct"""
def __init__( self : Any , A : Optional[Any]=8_065 , A : List[Any]=1_536 , A : List[str]=36 , A : Optional[int]=6_144 , A : int=4 , A : List[str]=384 , A : Any=920 , A : Optional[Any]=1E-5 , A : str=0.3 , A : Dict="relu" , A : int=0.02 , A : Any=0.3 , A : Optional[int]=0.3 , A : Optional[int]=1 , A : Optional[int]=0 , A : Optional[int]=2 , A : Any=1 , A : Union[str, Any]=0.3 , A : Tuple=1 , A : Optional[int]=(7,) , A : Tuple=(3,) , A : List[Any]=80 , A : Tuple=1 , A : Optional[int]=None , A : Optional[int]="sum" , A : List[str]=False , **A : Optional[int] , ):
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
__snake_case: List[str] = vocab_size
__snake_case: str = hidden_size
__snake_case: Union[str, Any] = num_hidden_layers
__snake_case: Any = intermediate_size
__snake_case: List[str] = num_attention_heads
__snake_case: List[str] = attention_head_dim
__snake_case: Tuple = max_position_embeddings
__snake_case: Optional[Any] = layer_norm_eps
__snake_case: List[str] = layerdrop
__snake_case: Any = hidden_act
__snake_case: Any = initializer_range
__snake_case: Optional[int] = hidden_dropout_prob
__snake_case: List[str] = attention_probs_dropout_prob
__snake_case: Union[str, Any] = pad_token_id
__snake_case: Any = bos_token_id
__snake_case: List[Any] = eos_token_id
__snake_case: Any = conv_glu_dim
__snake_case: Optional[int] = conv_dropout
__snake_case: Union[str, Any] = num_conv_layers
__snake_case: List[str] = input_feat_per_channel
__snake_case: int = input_channels
__snake_case: Tuple = conv_channels
__snake_case: List[str] = ctc_loss_reduction
__snake_case: Optional[int] = ctc_zero_infinity
# prevents config testing fail with exporting to json
__snake_case: Optional[Any] = list(A )
__snake_case: List[str] = list(A )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f'''but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, '''
f'''`config.num_conv_layers = {self.num_conv_layers}`.''' )
| 155 |
def A__ ( SCREAMING_SNAKE_CASE__ = 1000) -> int:
__snake_case , __snake_case: Dict = 1, 1
__snake_case: int = 2
while True:
__snake_case: str = 0
__snake_case: Any = fa + fa
__snake_case , __snake_case: Tuple = fa, f
index += 1
for _ in str(SCREAMING_SNAKE_CASE__):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 155 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=__SCREAMING_SNAKE_CASE )
class snake_case__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCamelCase = field(default="""audio-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCamelCase = Features({"""audio""": Audio()} )
lowerCamelCase = Features({"""labels""": ClassLabel} )
lowerCamelCase = """audio"""
lowerCamelCase = """labels"""
def lowerCAmelCase ( self : Optional[Any] , UpperCamelCase__ : str ) -> Dict:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
snake_case : Any = copy.deepcopy(self )
snake_case : str = self.label_schema.copy()
snake_case : int = features[self.label_column]
snake_case : Tuple = label_schema
return task_template
@property
def lowerCAmelCase ( self : Tuple ) -> Dict[str, str]:
"""simple docstring"""
return {
self.audio_column: "audio",
self.label_column: "labels",
}
| 638 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ ( lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = KandinskyVaaInpaintPipeline
__snake_case = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__snake_case = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__snake_case = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__snake_case = False
@property
def __lowerCAmelCase ( self : Optional[int] ) ->Tuple:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : Optional[Any] ) ->Dict:
"""simple docstring"""
return 32
@property
def __lowerCAmelCase ( self : List[Any] ) ->Union[str, Any]:
"""simple docstring"""
return self.time_input_dim
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[Any]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def __lowerCAmelCase ( self : Any ) ->Optional[int]:
"""simple docstring"""
return 100
@property
def __lowerCAmelCase ( self : Tuple ) ->Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
a = UNetaDConditionModel(**__UpperCAmelCase )
return model
@property
def __lowerCAmelCase ( self : List[Any] ) ->List[Any]:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCAmelCase ( self : str ) ->List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
a = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self.dummy_unet
a = self.dummy_movq
a = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='''linear''' , beta_start=0.00085 , beta_end=0.012 , clip_sample=__UpperCAmelCase , set_alpha_to_one=__UpperCAmelCase , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__UpperCAmelCase , )
a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def __lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : int=0 ) ->Optional[int]:
"""simple docstring"""
a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__UpperCAmelCase )
# create init_image
a = floats_tensor((1, 3, 64, 64) , rng=random.Random(__UpperCAmelCase ) ).to(__UpperCAmelCase )
a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a = Image.fromarray(np.uinta(__UpperCAmelCase ) ).convert('''RGB''' ).resize((256, 256) )
# create mask
a = np.ones((64, 64) , dtype=np.floataa )
a = 0
if str(__UpperCAmelCase ).startswith('''mps''' ):
a = torch.manual_seed(__UpperCAmelCase )
else:
a = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase )
a = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def __lowerCAmelCase ( self : Dict ) ->int:
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**__UpperCAmelCase )
a = pipe.to(__UpperCAmelCase )
pipe.set_progress_bar_config(disable=__UpperCAmelCase )
a = pipe(**self.get_dummy_inputs(__UpperCAmelCase ) )
a = output.images
a = pipe(
**self.get_dummy_inputs(__UpperCAmelCase ) , return_dict=__UpperCAmelCase , )[0]
a = image[0, -3:, -3:, -1]
a = image_from_tuple[0, -3:, -3:, -1]
print(F"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
a = np.array(
[0.50775903, 0.49527195, 0.48824543, 0.50192237, 0.48644906, 0.49373814, 0.4780598, 0.47234827, 0.48327848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[int]:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self : List[str] ) ->str:
"""simple docstring"""
a = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy''' )
a = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
a = np.ones((768, 768) , dtype=np.floataa )
a = 0
a = '''a hat'''
a = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__UpperCAmelCase )
a = KandinskyVaaInpaintPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder-inpaint''' , torch_dtype=torch.floataa )
a = pipeline.to(__UpperCAmelCase )
pipeline.set_progress_bar_config(disable=__UpperCAmelCase )
a = torch.Generator(device='''cpu''' ).manual_seed(0 )
a , a = pipe_prior(
__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
a = pipeline(
image=__UpperCAmelCase , mask_image=__UpperCAmelCase , image_embeds=__UpperCAmelCase , negative_image_embeds=__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=100 , height=768 , width=768 , output_type='''np''' , )
a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__UpperCAmelCase , __UpperCAmelCase )
| 117 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE( __A ):
snake_case_ : Tuple = (DPMSolverSinglestepScheduler,)
snake_case_ : int = (("""num_inference_steps""", 25),)
def snake_case__ ( self , **lowerCamelCase__ ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.00_01,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**lowerCamelCase__ )
return config
def snake_case__ ( self , lowerCamelCase__=0 , **lowerCamelCase__ ) -> Any:
"""simple docstring"""
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config(**lowerCamelCase__ )
__lowercase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
__lowercase = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
__lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase ,__lowercase = sample, sample
for t in range(lowerCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowercase = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self ) -> str:
"""simple docstring"""
pass
def snake_case__ ( self , lowerCamelCase__=0 , **lowerCamelCase__ ) -> Tuple:
"""simple docstring"""
__lowercase = dict(self.forward_default_kwargs )
__lowercase = kwargs.pop("""num_inference_steps""" , lowerCamelCase__ )
__lowercase = self.dummy_sample
__lowercase = 0.1 * sample
__lowercase = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__lowercase = self.get_scheduler_config()
__lowercase = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
__lowercase = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
__lowercase = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
__lowercase = dummy_past_residuals[: new_scheduler.config.solver_order]
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
__lowercase = new_scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def snake_case__ ( self , lowerCamelCase__=None , **lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
if scheduler is None:
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**lowerCamelCase__ )
__lowercase = scheduler_class(**lowerCamelCase__ )
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(**lowerCamelCase__ )
__lowercase = scheduler_class(**lowerCamelCase__ )
__lowercase = 10
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
return sample
def snake_case__ ( self ) -> str:
"""simple docstring"""
__lowercase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowercase = 50
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.25_74 ) < 1E-3
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
__lowercase = self.full_loop(scheduler=lowerCamelCase__ )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
__lowercase = DEISMultistepScheduler.from_config(scheduler.config )
__lowercase = DPMSolverMultistepScheduler.from_config(scheduler.config )
__lowercase = UniPCMultistepScheduler.from_config(scheduler.config )
__lowercase = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__lowercase = self.full_loop(scheduler=lowerCamelCase__ )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__ , prediction_type=lowerCamelCase__ , sample_max_value=lowerCamelCase__ , algorithm_type="""dpmsolver++""" , solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def snake_case__ ( self ) -> str:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
__lowercase = self.full_loop(
solver_order=lowerCamelCase__ , solver_type=lowerCamelCase__ , prediction_type=lowerCamelCase__ , algorithm_type=lowerCamelCase__ , )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def snake_case__ ( self ) -> Any:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def snake_case__ ( self ) -> Any:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCamelCase__ )
self.check_over_configs(variance_type="""learned_range""" )
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__ , time_step=0 )
def snake_case__ ( self ) -> List[Any]:
"""simple docstring"""
__lowercase = self.full_loop()
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.27_91 ) < 1E-3
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = self.full_loop(use_karras_sigmas=lowerCamelCase__ )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.22_48 ) < 1E-3
def snake_case__ ( self ) -> Any:
"""simple docstring"""
__lowercase = self.full_loop(prediction_type="""v_prediction""" )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.14_53 ) < 1E-3
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
__lowercase = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=lowerCamelCase__ )
__lowercase = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.06_49 ) < 1E-3
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = self.scheduler_classes[0]
__lowercase = self.get_scheduler_config(thresholding=lowerCamelCase__ , dynamic_thresholding_ratio=0 )
__lowercase = scheduler_class(**lowerCamelCase__ )
__lowercase = 10
__lowercase = self.dummy_model()
__lowercase = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
__lowercase = model(lowerCamelCase__ , lowerCamelCase__ )
__lowercase = scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 163 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
__lowercase = tempfile.mkdtemp()
__lowercase = BlipImageProcessor()
__lowercase = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__lowercase = BlipaProcessor(lowerCamelCase__ , lowerCamelCase__ )
processor.save_pretrained(self.tmpdirname )
def snake_case__ ( self , **lowerCamelCase__ ) -> int:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).tokenizer
def snake_case__ ( self , **lowerCamelCase__ ) -> Optional[int]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname , **lowerCamelCase__ ).image_processor
def snake_case__ ( self ) -> Union[str, Any]:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__lowercase = [Image.fromarray(np.moveaxis(lowerCamelCase__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def snake_case__ ( self ) -> List[str]:
"""simple docstring"""
__lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowercase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__lowercase = self.get_image_processor(do_normalize=lowerCamelCase__ , padding_value=1.0 )
__lowercase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCamelCase__ )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = self.prepare_image_inputs()
__lowercase = image_processor(lowerCamelCase__ , return_tensors="""np""" )
__lowercase = processor(images=lowerCamelCase__ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def snake_case__ ( self ) -> Optional[Any]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = """lower newer"""
__lowercase = processor(text=lowerCamelCase__ )
__lowercase = tokenizer(lowerCamelCase__ , return_token_type_ids=lowerCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self ) -> Optional[int]:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = """lower newer"""
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase__ ):
processor()
def snake_case__ ( self ) -> int:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowercase = processor.batch_decode(lowerCamelCase__ )
__lowercase = tokenizer.batch_decode(lowerCamelCase__ )
self.assertListEqual(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ) -> Dict:
"""simple docstring"""
__lowercase = self.get_image_processor()
__lowercase = self.get_tokenizer()
__lowercase = BlipaProcessor(tokenizer=lowerCamelCase__ , image_processor=lowerCamelCase__ )
__lowercase = """lower newer"""
__lowercase = self.prepare_image_inputs()
__lowercase = processor(text=lowerCamelCase__ , images=lowerCamelCase__ )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 163 | 1 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
_snake_case = "us-east-1" # defaults region
@dataclass
class _a :
a_ : List[str] = 42
a_ : Optional[Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
a_ : Any = {
'task_name': 'mnli',
'per_device_train_batch_size': 16,
'per_device_eval_batch_size': 16,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 500,
'save_steps': 5500,
}
a_ : Union[str, Any] = {**hyperparameters, 'max_steps': 1000}
@property
def _UpperCamelCase ( self : Any ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _UpperCamelCase ( self : Optional[int] ):
return F'{self.framework}-transfromers-test'
@property
def _UpperCamelCase ( self : List[str] ):
return F'./tests/sagemaker/scripts/{self.framework}'
@property
def _UpperCamelCase ( self : Any ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def snake_case ( _a: Tuple )-> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = SageMakerTestEnvironment(framework=request.cls.framework )
| 510 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCAmelCase = {
'configuration_wav2vec2': ['WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Wav2Vec2Config'],
'feature_extraction_wav2vec2': ['Wav2Vec2FeatureExtractor'],
'processing_wav2vec2': ['Wav2Vec2Processor'],
'tokenization_wav2vec2': ['Wav2Vec2CTCTokenizer', 'Wav2Vec2Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Wav2Vec2ForAudioFrameClassification',
'Wav2Vec2ForCTC',
'Wav2Vec2ForMaskedLM',
'Wav2Vec2ForPreTraining',
'Wav2Vec2ForSequenceClassification',
'Wav2Vec2ForXVector',
'Wav2Vec2Model',
'Wav2Vec2PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWav2Vec2ForCTC',
'TFWav2Vec2Model',
'TFWav2Vec2PreTrainedModel',
'TFWav2Vec2ForSequenceClassification',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'FlaxWav2Vec2ForCTC',
'FlaxWav2Vec2ForPreTraining',
'FlaxWav2Vec2Model',
'FlaxWav2Vec2PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 585 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Tuple = """mobilenet_v1"""
def __init__( self , A=3 , A=224 , A=1.0 , A=8 , A="relu6" , A=True , A=0.9_9_9 , A=0.0_2 , A=0.0_0_1 , **A , ):
super().__init__(**A )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
_lowerCamelCase : Union[str, Any] = num_channels
_lowerCamelCase : Optional[Any] = image_size
_lowerCamelCase : Tuple = depth_multiplier
_lowerCamelCase : Dict = min_depth
_lowerCamelCase : Optional[Any] = hidden_act
_lowerCamelCase : Optional[int] = tf_padding
_lowerCamelCase : Optional[int] = classifier_dropout_prob
_lowerCamelCase : Any = initializer_range
_lowerCamelCase : str = layer_norm_eps
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ : Dict = version.parse("""1.11""" )
@property
def _lowerCAmelCase ( self ):
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def _lowerCAmelCase ( self ):
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def _lowerCAmelCase ( self ):
return 1E-4
| 349 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class A_(unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = tempfile.mkdtemp()
# fmt: off
_lowerCamelCase : List[str] = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
_lowerCamelCase : Optional[Any] = dict(zip(A , range(len(A ) ) ) )
_lowerCamelCase : Optional[int] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
_lowerCamelCase : Optional[Any] = {'unk_token': '<unk>'}
_lowerCamelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCamelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(A ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(A ) )
_lowerCamelCase : Any = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
'image_std': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
_lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , A )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(A , A )
def _lowerCAmelCase ( self , **A ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , **A ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self , **A ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **A )
def _lowerCAmelCase ( self ):
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_lowerCamelCase : List[Any] = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[int] = self.get_rust_tokenizer()
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : str = CLIPProcessor(tokenizer=A , image_processor=A )
processor_slow.save_pretrained(self.tmpdirname )
_lowerCamelCase : Union[str, Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=A )
_lowerCamelCase : Dict = CLIPProcessor(tokenizer=A , image_processor=A )
processor_fast.save_pretrained(self.tmpdirname )
_lowerCamelCase : Dict = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A )
self.assertIsInstance(processor_fast.tokenizer , A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A )
self.assertIsInstance(processor_fast.image_processor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_lowerCamelCase : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_lowerCamelCase : str = self.get_image_processor(do_normalize=A , padding_value=1.0 )
_lowerCamelCase : Dict = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : List[str] = self.get_image_processor()
_lowerCamelCase : str = self.get_tokenizer()
_lowerCamelCase : Any = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : str = self.prepare_image_inputs()
_lowerCamelCase : List[Any] = image_processor(A , return_tensors='np' )
_lowerCamelCase : int = processor(images=A , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Dict = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Dict = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : Optional[Any] = 'lower newer'
_lowerCamelCase : Union[str, Any] = processor(text=A )
_lowerCamelCase : Tuple = tokenizer(A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
_lowerCamelCase : str = self.get_image_processor()
_lowerCamelCase : List[str] = self.get_tokenizer()
_lowerCamelCase : List[Any] = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : Any = 'lower newer'
_lowerCamelCase : Any = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(A ):
processor()
def _lowerCAmelCase ( self ):
_lowerCamelCase : Optional[int] = self.get_image_processor()
_lowerCamelCase : List[Any] = self.get_tokenizer()
_lowerCamelCase : Optional[int] = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCamelCase : int = processor.batch_decode(A )
_lowerCamelCase : Dict = tokenizer.batch_decode(A )
self.assertListEqual(A , A )
def _lowerCAmelCase ( self ):
_lowerCamelCase : Any = self.get_image_processor()
_lowerCamelCase : int = self.get_tokenizer()
_lowerCamelCase : int = CLIPProcessor(tokenizer=A , image_processor=A )
_lowerCamelCase : str = 'lower newer'
_lowerCamelCase : List[str] = self.prepare_image_inputs()
_lowerCamelCase : Optional[int] = processor(text=A , images=A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 349 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase :Dict = {
'configuration_xlm_roberta_xl': [
'XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaXLConfig',
'XLMRobertaXLOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase :List[Any] = [
'XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaXLForCausalLM',
'XLMRobertaXLForMaskedLM',
'XLMRobertaXLForMultipleChoice',
'XLMRobertaXLForQuestionAnswering',
'XLMRobertaXLForSequenceClassification',
'XLMRobertaXLForTokenClassification',
'XLMRobertaXLModel',
'XLMRobertaXLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
__lowerCamelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 222 |
"""simple docstring"""
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def snake_case ( UpperCamelCase__ : str ) -> Any:
lowerCamelCase : Dict = model.config
lowerCamelCase : Dict = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowerCamelCase : Dict = MBartConfig(
is_decoder=UpperCamelCase__ , is_encoder_decoder=UpperCamelCase__ , add_cross_attention=UpperCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=UpperCamelCase__ , add_final_layer_norm=UpperCamelCase__ , )
return encoder_config, decoder_config
def snake_case ( UpperCamelCase__ : str ) -> List[str]:
if "encoder.model" in name:
lowerCamelCase : Optional[Any] = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowerCamelCase : Tuple = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowerCamelCase : Optional[int] = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase : Dict = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowerCamelCase : str = """encoder.""" + name
if "attn.proj" in name:
lowerCamelCase : str = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowerCamelCase : List[str] = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase : str = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase : List[str] = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase : List[Any] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase : List[str] = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowerCamelCase : Optional[int] = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowerCamelCase : List[Any] = """encoder.layernorm.bias"""
return name
def snake_case ( UpperCamelCase__ : Dict , UpperCamelCase__ : Union[str, Any] ) -> Optional[Any]:
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[Any] = orig_state_dict.pop(UpperCamelCase__ )
if "qkv" in key:
lowerCamelCase : Tuple = key.split(""".""" )
lowerCamelCase : Dict = int(key_split[3] )
lowerCamelCase : List[Any] = int(key_split[5] )
lowerCamelCase : Dict = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase : List[Any] = val[:dim, :]
lowerCamelCase : Any = val[dim : dim * 2, :]
lowerCamelCase : Optional[Any] = val[-dim:, :]
else:
lowerCamelCase : str = val[:dim]
lowerCamelCase : List[str] = val[dim : dim * 2]
lowerCamelCase : str = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowerCamelCase : Dict = val
return orig_state_dict
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[Any]=False ) -> Optional[int]:
# load original model
lowerCamelCase : Tuple = DonutModel.from_pretrained(UpperCamelCase__ ).eval()
# load HuggingFace model
lowerCamelCase , lowerCamelCase : List[Any] = get_configs(UpperCamelCase__ )
lowerCamelCase : str = DonutSwinModel(UpperCamelCase__ )
lowerCamelCase : Optional[int] = MBartForCausalLM(UpperCamelCase__ )
lowerCamelCase : int = VisionEncoderDecoderModel(encoder=UpperCamelCase__ , decoder=UpperCamelCase__ )
model.eval()
lowerCamelCase : Any = original_model.state_dict()
lowerCamelCase : str = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# verify results on scanned document
lowerCamelCase : Optional[int] = load_dataset("""hf-internal-testing/example-documents""" )
lowerCamelCase : str = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowerCamelCase : List[Any] = XLMRobertaTokenizerFast.from_pretrained(UpperCamelCase__ , from_slow=UpperCamelCase__ )
lowerCamelCase : Union[str, Any] = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowerCamelCase : Dict = DonutProcessor(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase : int = processor(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowerCamelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowerCamelCase : Union[str, Any] = """When is the coffee break?"""
lowerCamelCase : int = task_prompt.replace("""{user_input}""" , UpperCamelCase__ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowerCamelCase : Dict = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowerCamelCase : Tuple = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowerCamelCase : Optional[int] = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowerCamelCase : Optional[int] = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowerCamelCase : Tuple = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowerCamelCase : Any = original_model.decoder.tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , return_tensors="""pt""" )[
"""input_ids"""
]
lowerCamelCase : str = original_model.encoder.model.patch_embed(UpperCamelCase__ )
lowerCamelCase , lowerCamelCase : Optional[Any] = model.encoder.embeddings(UpperCamelCase__ )
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
# verify encoder hidden states
lowerCamelCase : Union[str, Any] = original_model.encoder(UpperCamelCase__ )
lowerCamelCase : Tuple = model.encoder(UpperCamelCase__ ).last_hidden_state
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-2 )
# verify decoder hidden states
lowerCamelCase : Any = original_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).logits
lowerCamelCase : Tuple = model(UpperCamelCase__ , decoder_input_ids=UpperCamelCase__ ).logits
assert torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
__lowerCamelCase :Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
__lowerCamelCase :Tuple = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 222 | 1 |
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] , a : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = arr.split("," )
def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = [int(self.array[0] )] * len(self.array )
SCREAMING_SNAKE_CASE : Tuple = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
SCREAMING_SNAKE_CASE : List[Any] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
SCREAMING_SNAKE_CASE : Tuple = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
a_ = input('please input some numbers:')
a_ = SubArray(whole_array)
a_ = array.solve_sub_array()
print(('the results is:', re))
| 717 |
from __future__ import annotations
def lowerCamelCase__ ( _a , _a):
if b == 0:
return (1, 0)
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : Tuple = extended_euclid(_a , a % b)
SCREAMING_SNAKE_CASE : Dict = a // b
return (y, x - k * y)
def lowerCamelCase__ ( _a , _a , _a , _a):
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : str = extended_euclid(_a , _a)
SCREAMING_SNAKE_CASE : str = na * na
SCREAMING_SNAKE_CASE : int = ra * x * na + ra * y * na
return (n % m + m) % m
def lowerCamelCase__ ( _a , _a):
((SCREAMING_SNAKE_CASE) ,(SCREAMING_SNAKE_CASE)) : List[str] = extended_euclid(_a , _a)
if b < 0:
SCREAMING_SNAKE_CASE : int = (b % n + n) % n
return b
def lowerCamelCase__ ( _a , _a , _a , _a):
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Dict = invert_modulo(_a , _a), invert_modulo(_a , _a)
SCREAMING_SNAKE_CASE : Optional[int] = na * na
SCREAMING_SNAKE_CASE : Optional[Any] = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name='chinese_remainder_theorem', verbose=True)
testmod(name='chinese_remainder_theorem2', verbose=True)
testmod(name='invert_modulo', verbose=True)
testmod(name='extended_euclid', verbose=True) | 193 | 0 |
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class UpperCAmelCase_ ( lowerCamelCase__ ):
'''simple docstring'''
UpperCamelCase__ : Dict = ['''image_processor''', '''tokenizer''']
UpperCamelCase__ : Union[str, Any] = '''AutoImageProcessor'''
UpperCamelCase__ : Optional[Any] = '''AutoTokenizer'''
def __init__( self , _A=None , _A=None , **_A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _A , )
__SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_A , _A )
__SCREAMING_SNAKE_CASE = self.image_processor
__SCREAMING_SNAKE_CASE = False
def __call__( self , *_A , **_A ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*_A , **_A )
__SCREAMING_SNAKE_CASE = kwargs.pop('images' , _A )
__SCREAMING_SNAKE_CASE = kwargs.pop('text' , _A )
if len(_A ) > 0:
__SCREAMING_SNAKE_CASE = args[0]
__SCREAMING_SNAKE_CASE = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__SCREAMING_SNAKE_CASE = self.image_processor(_A , *_A , **_A )
if text is not None:
__SCREAMING_SNAKE_CASE = self.tokenizer(_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
__SCREAMING_SNAKE_CASE = encodings['input_ids']
return inputs
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_A , **_A )
def _A ( self , *_A , **_A ):
'''simple docstring'''
return self.tokenizer.decode(*_A , **_A )
@contextmanager
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer
yield
__SCREAMING_SNAKE_CASE = self.image_processor
__SCREAMING_SNAKE_CASE = False
def _A ( self , _A , _A=False , _A=None ):
'''simple docstring'''
if added_vocab is None:
__SCREAMING_SNAKE_CASE = self.tokenizer.get_added_vocab()
__SCREAMING_SNAKE_CASE = {}
while tokens:
__SCREAMING_SNAKE_CASE = re.search(r'<s_(.*?)>' , _A , re.IGNORECASE )
if start_token is None:
break
__SCREAMING_SNAKE_CASE = start_token.group(1 )
__SCREAMING_SNAKE_CASE = re.search(rf"""</s_{key}>""" , _A , re.IGNORECASE )
__SCREAMING_SNAKE_CASE = start_token.group()
if end_token is None:
__SCREAMING_SNAKE_CASE = tokens.replace(_A , '' )
else:
__SCREAMING_SNAKE_CASE = end_token.group()
__SCREAMING_SNAKE_CASE = re.escape(_A )
__SCREAMING_SNAKE_CASE = re.escape(_A )
__SCREAMING_SNAKE_CASE = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , _A , re.IGNORECASE )
if content is not None:
__SCREAMING_SNAKE_CASE = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__SCREAMING_SNAKE_CASE = self.tokenajson(_A , is_inner_value=_A , added_vocab=_A )
if value:
if len(_A ) == 1:
__SCREAMING_SNAKE_CASE = value[0]
__SCREAMING_SNAKE_CASE = value
else: # leaf nodes
__SCREAMING_SNAKE_CASE = []
for leaf in content.split(r'<sep/>' ):
__SCREAMING_SNAKE_CASE = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__SCREAMING_SNAKE_CASE = leaf[1:-2] # for categorical special tokens
output[key].append(_A )
if len(output[key] ) == 1:
__SCREAMING_SNAKE_CASE = output[key][0]
__SCREAMING_SNAKE_CASE = tokens[tokens.find(_A ) + len(_A ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_A , added_vocab=_A )
if len(_A ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _A , )
return self.image_processor_class
@property
def _A ( self ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _A , )
return self.image_processor
| 148 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import fairseq
import torch
from packaging import version
from torch import nn
from transformers import (
BartConfig,
BartForConditionalGeneration,
BartForSequenceClassification,
BartModel,
BartTokenizer,
)
from transformers.utils import logging
SCREAMING_SNAKE_CASE_ = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"]
SCREAMING_SNAKE_CASE_ = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification}
if version.parse(fairseq.__version__) < version.parse("0.9.0"):
raise Exception("requires fairseq >= 0.9.0")
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = " Hello world! cécé herlolip"
SCREAMING_SNAKE_CASE_ = [
("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"),
("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"),
("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"),
("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"),
]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : Dict = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'_float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__a : Dict = dct.pop(SCREAMING_SNAKE_CASE__ )
__a : Dict = val
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a : Dict = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )
__a : Dict = torch.hub.load('pytorch/fairseq' , 'bart.large.cnn' ).eval()
hub_interface.model.load_state_dict(sd['model'] )
return hub_interface
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ ):
__a , __a : Dict = emb.weight.shape
__a : Optional[Any] = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__ )
__a : List[Any] = emb.weight.data
return lin_layer
@torch.no_grad()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
if not os.path.exists(SCREAMING_SNAKE_CASE__ ):
__a : Tuple = torch.hub.load('pytorch/fairseq' , SCREAMING_SNAKE_CASE__ ).eval()
else:
__a : Optional[int] = load_xsum_checkpoint(SCREAMING_SNAKE_CASE__ )
bart.model.upgrade_state_dict(bart.model.state_dict() )
if hf_checkpoint_name is None:
__a : List[str] = checkpoint_path.replace('.' , '-' )
__a : Optional[Any] = BartConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = bart.encode(SCREAMING_SNAKE_CASE__ ).unsqueeze(0 )
__a : List[str] = BartTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ ).encode(SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).unsqueeze(0 )
if not torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).all():
raise ValueError(
f'''converted tokenizer and pretrained tokenizer returned different output: {tokens} != {tokensa}''' )
if checkpoint_path == "bart.large.mnli":
__a : List[Any] = bart.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__a : str = state_dict['model.decoder.embed_tokens.weight']
for src, dest in mnli_rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Dict = BartForSequenceClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__a : Any = bart.predict('mnli' , SCREAMING_SNAKE_CASE__ , return_logits=SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )[0] # logits
else: # no classification heads to worry about
__a : Dict = bart.model.state_dict()
remove_ignore_keys_(SCREAMING_SNAKE_CASE__ )
__a : Optional[Any] = state_dict['decoder.embed_tokens.weight']
__a : List[Any] = bart.extract_features(SCREAMING_SNAKE_CASE__ )
if hf_checkpoint_name == "facebook/bart-large":
__a : Dict = BartModel(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
__a : str = model(SCREAMING_SNAKE_CASE__ ).model[0]
else:
__a : Optional[Any] = BartForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval() # an existing summarization ckpt
model.model.load_state_dict(SCREAMING_SNAKE_CASE__ )
if hasattr(SCREAMING_SNAKE_CASE__ , 'lm_head' ):
__a : Optional[int] = make_linear_from_emb(model.model.shared )
__a : List[Any] = model.model(SCREAMING_SNAKE_CASE__ )[0]
# Check results
if fairseq_output.shape != new_model_outputs.shape:
raise ValueError(
f'''`fairseq_output` shape and `new_model_output` shape are different: {fairseq_output.shape=}, {new_model_outputs.shape}''' )
if (fairseq_output != new_model_outputs).any().item():
raise ValueError('Some values in `fairseq_output` are different from `new_model_outputs`' )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum"
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
| 597 | 0 |
import inspect
import unittest
from transformers import DecisionTransformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import DecisionTransformerModel
from transformers.models.decision_transformer.modeling_decision_transformer import (
DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
class A :
'''simple docstring'''
def __init__( self : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Tuple=13 , _UpperCamelCase : Dict=7 , _UpperCamelCase : List[Any]=6 , _UpperCamelCase : Any=17 , _UpperCamelCase : List[str]=23 , _UpperCamelCase : Optional[Any]=11 , _UpperCamelCase : List[Any]=True , ):
_lowercase: Any = parent
_lowercase: Dict = batch_size
_lowercase: str = seq_length
_lowercase: List[str] = act_dim
_lowercase: List[str] = state_dim
_lowercase: Any = hidden_size
_lowercase: Dict = max_length
_lowercase: Tuple = is_training
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: str = floats_tensor((self.batch_size, self.seq_length, self.state_dim))
_lowercase: Dict = floats_tensor((self.batch_size, self.seq_length, self.act_dim))
_lowercase: int = floats_tensor((self.batch_size, self.seq_length, 1))
_lowercase: List[Any] = floats_tensor((self.batch_size, self.seq_length, 1))
_lowercase: Optional[Any] = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_000)
_lowercase: int = random_attention_mask((self.batch_size, self.seq_length))
_lowercase: Optional[int] = self.get_config()
return (
config,
states,
actions,
rewards,
returns_to_go,
timesteps,
attention_mask,
)
def UpperCAmelCase__ ( self : int):
return DecisionTransformerConfig(
batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , )
def UpperCAmelCase__ ( self : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any , ):
_lowercase: Optional[int] = DecisionTransformerModel(config=_UpperCamelCase)
model.to(_UpperCamelCase)
model.eval()
_lowercase: Dict = model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
self.parent.assertEqual(result.state_preds.shape , states.shape)
self.parent.assertEqual(result.action_preds.shape , actions.shape)
self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size)) # seq length *3 as there are 3 modelities: states, returns and actions
def UpperCAmelCase__ ( self : List[str]):
_lowercase: str = self.prepare_config_and_inputs()
(
_lowercase
): List[Any] = config_and_inputs
_lowercase: Optional[Any] = {
"states": states,
"actions": actions,
"rewards": rewards,
"returns_to_go": returns_to_go,
"timesteps": timesteps,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
class A ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase : str = (DecisionTransformerModel,) if is_torch_available() else ()
lowerCamelCase : List[str] = ()
lowerCamelCase : Any = {"""feature-extraction""": DecisionTransformerModel} if is_torch_available() else {}
# Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids
lowerCamelCase : int = False
# Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features
lowerCamelCase : List[str] = False
lowerCamelCase : Any = False
lowerCamelCase : List[str] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Optional[int] = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Dict = False
lowerCamelCase : int = False
def UpperCAmelCase__ ( self : Union[str, Any]):
_lowercase: int = DecisionTransformerModelTester(self)
_lowercase: Tuple = ConfigTester(self , config_class=_UpperCamelCase , hidden_size=37)
def UpperCAmelCase__ ( self : List[Any]):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCamelCase)
@slow
def UpperCAmelCase__ ( self : int):
for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowercase: int = DecisionTransformerModel.from_pretrained(_UpperCamelCase)
self.assertIsNotNone(_UpperCamelCase)
def UpperCAmelCase__ ( self : Any):
_lowercase: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase: str = model_class(_UpperCamelCase)
_lowercase: Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase: Tuple = [*signature.parameters.keys()]
_lowercase: List[Any] = [
"states",
"actions",
"rewards",
"returns_to_go",
"timesteps",
"attention_mask",
]
self.assertListEqual(arg_names[: len(_UpperCamelCase)] , _UpperCamelCase)
@require_torch
class A ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Optional[int]):
_lowercase: Any = 2 # number of steps of autoregressive prediction we will perform
_lowercase: Any = 10 # defined by the RL environment, may be normalized
_lowercase: Tuple = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert")
_lowercase: Optional[int] = model.to(_UpperCamelCase)
_lowercase: List[Any] = model.config
torch.manual_seed(0)
_lowercase: Any = torch.randn(1 , 1 , config.state_dim).to(device=_UpperCamelCase , dtype=torch.floataa) # env.reset()
_lowercase: Any = torch.tensor(
[[0.2_4_2_7_9_3, -0.2_8_6_9_3_0_7_4, 0.8_7_4_2_6_1_3], [0.6_7_8_1_5_2_7_4, -0.0_8_1_0_1_0_8_5, -0.1_2_9_5_2_1_4_7]] , device=_UpperCamelCase)
_lowercase: Union[str, Any] = torch.tensor(_UpperCamelCase , device=_UpperCamelCase , dtype=torch.floataa).reshape(1 , 1 , 1)
_lowercase: Dict = state
_lowercase: Any = torch.zeros(1 , 0 , config.act_dim , device=_UpperCamelCase , dtype=torch.floataa)
_lowercase: int = torch.zeros(1 , 0 , device=_UpperCamelCase , dtype=torch.floataa)
_lowercase: Optional[int] = torch.tensor(0 , device=_UpperCamelCase , dtype=torch.long).reshape(1 , 1)
for step in range(_UpperCamelCase):
_lowercase: List[str] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=_UpperCamelCase)] , dim=1)
_lowercase: Any = torch.cat([rewards, torch.zeros(1 , 1 , device=_UpperCamelCase)] , dim=1)
_lowercase: Optional[Any] = torch.ones(1 , states.shape[1]).to(dtype=torch.long , device=states.device)
with torch.no_grad():
_lowercase: Optional[Any] = model(
states=_UpperCamelCase , actions=_UpperCamelCase , rewards=_UpperCamelCase , returns_to_go=_UpperCamelCase , timesteps=_UpperCamelCase , attention_mask=_UpperCamelCase , return_dict=_UpperCamelCase , )
self.assertEqual(action_pred.shape , actions.shape)
self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4))
_lowercase: Tuple = ( # env.step(action)
torch.randn(1 , 1 , config.state_dim).to(device=_UpperCamelCase , dtype=torch.floataa),
1.0,
False,
{},
)
_lowercase: Union[str, Any] = action_pred[0, -1]
_lowercase: List[str] = torch.cat([states, state] , dim=1)
_lowercase: Optional[int] = returns_to_go[0, -1] - reward
_lowercase: List[str] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1)] , dim=1)
_lowercase: Dict = torch.cat(
[timesteps, torch.ones((1, 1) , device=_UpperCamelCase , dtype=torch.long) * (step + 1)] , dim=1)
| 705 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
_SCREAMING_SNAKE_CASE : Optional[Any] = {'configuration_beit': ['BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BeitConfig', 'BeitOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Optional[Any] = ['BeitFeatureExtractor']
_SCREAMING_SNAKE_CASE : Optional[int] = ['BeitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : str = [
'BEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'BeitForImageClassification',
'BeitForMaskedImageModeling',
'BeitForSemanticSegmentation',
'BeitModel',
'BeitPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'FlaxBeitForImageClassification',
'FlaxBeitForMaskedImageModeling',
'FlaxBeitModel',
'FlaxBeitPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 206 | 0 |
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
__A = gray_code_sequence_string(a_ )
#
# convert them to integers
for i in range(len(a_ ) ):
__A = int(sequence[i] , 2 )
return sequence
def UpperCAmelCase ( a_ ) -> list:
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__A = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__A = gray_code_sequence_string(bit_count - 1 )
__A = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__A = "0" + smaller_sequence[i]
sequence.append(a_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__A = "1" + smaller_sequence[i]
sequence.append(a_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 55 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowercase ( lowerCamelCase__ ):
__UpperCAmelCase = ['''image_processor''', '''tokenizer''']
__UpperCAmelCase = '''ViltImageProcessor'''
__UpperCAmelCase = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_) -> List[Any]:
__snake_case = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
__snake_case = kwargs.pop('feature_extractor')
__snake_case = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.')
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.')
super().__init__(lowercase_ , lowercase_)
__snake_case = self.image_processor
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
__snake_case = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
__snake_case = self.image_processor(lowercase_ , return_tensors=lowercase_)
encoding.update(lowercase_)
return encoding
def _a ( self , *lowercase_ , **lowercase_) -> Optional[Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_)
def _a ( self , *lowercase_ , **lowercase_) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_)
@property
def _a ( self) -> Tuple:
__snake_case = self.tokenizer.model_input_names
__snake_case = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def _a ( self) -> Optional[int]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def _a ( self) -> List[str]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 313 | 0 |
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
__magic_name__ : Tuple = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
__magic_name__ : int = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
__magic_name__ : List[str] = {
'jukebox': 512,
}
class A__ ( __lowercase ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_LYRIC_TOKENS_SIZES
snake_case__ = ["""input_ids""", """attention_mask"""]
def __init__( self : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any]=["v3", "v2", "v2"] , _SCREAMING_SNAKE_CASE : Any=512 , _SCREAMING_SNAKE_CASE : str=5 , _SCREAMING_SNAKE_CASE : Tuple="<|endoftext|>" , **_SCREAMING_SNAKE_CASE : Optional[int] , ):
"""simple docstring"""
UpperCamelCase = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , n_genres=_SCREAMING_SNAKE_CASE , version=_SCREAMING_SNAKE_CASE , max_n_lyric_tokens=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = version
UpperCamelCase = max_n_lyric_tokens
UpperCamelCase = n_genres
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
UpperCamelCase = json.load(_SCREAMING_SNAKE_CASE )
UpperCamelCase = r"[^A-Za-z0-9.,:;!?\-'\"()\[\] \t\n]+"
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
UpperCamelCase = oov.replace(r'\-\'' , r'\-+\'' )
UpperCamelCase = regex.compile(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {v: k for k, v in self.artists_encoder.items()}
UpperCamelCase = {v: k for k, v in self.genres_encoder.items()}
UpperCamelCase = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
"""simple docstring"""
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
return dict(self.artists_encoder , self.genres_encoder , self.lyrics_encoder )
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
UpperCamelCase = [self.artists_encoder.get(_SCREAMING_SNAKE_CASE , 0 ) for artist in list_artists]
for genres in range(len(_SCREAMING_SNAKE_CASE ) ):
UpperCamelCase = [self.genres_encoder.get(_SCREAMING_SNAKE_CASE , 0 ) for genre in list_genres[genres]]
UpperCamelCase = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
UpperCamelCase = [[self.lyrics_encoder.get(_SCREAMING_SNAKE_CASE , 0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
return list(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Dict , **_SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase = self.prepare_for_tokenization(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._tokenize(_SCREAMING_SNAKE_CASE )
return artist, genre, lyrics
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
UpperCamelCase = artists[idx].lower()
UpperCamelCase = [genres[idx].lower()]
else:
UpperCamelCase = self._normalize(artists[idx] ) + ".v2"
UpperCamelCase = [
self._normalize(_SCREAMING_SNAKE_CASE ) + ".v2" for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
UpperCamelCase = regex.compile(r'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
UpperCamelCase = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+'\"()[] \t\n"
UpperCamelCase = {vocab[index]: index + 1 for index in range(len(_SCREAMING_SNAKE_CASE ) )}
UpperCamelCase = 0
UpperCamelCase = len(_SCREAMING_SNAKE_CASE ) + 1
UpperCamelCase = self.vocab
UpperCamelCase = {v: k for k, v in self.vocab.items()}
UpperCamelCase = ""
else:
UpperCamelCase = regex.compile(r'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
UpperCamelCase = self._run_strip_accents(_SCREAMING_SNAKE_CASE )
UpperCamelCase = lyrics.replace('\\' , '\n' )
UpperCamelCase = self.out_of_vocab.sub('' , _SCREAMING_SNAKE_CASE ), [], []
return artists, genres, lyrics
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
UpperCamelCase = unicodedata.normalize('NFD' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = []
for char in text:
UpperCamelCase = unicodedata.category(_SCREAMING_SNAKE_CASE )
if cat == "Mn":
continue
output.append(_SCREAMING_SNAKE_CASE )
return "".join(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
UpperCamelCase = (
[chr(_SCREAMING_SNAKE_CASE ) for i in range(ord('a' ) , ord('z' ) + 1 )]
+ [chr(_SCREAMING_SNAKE_CASE ) for i in range(ord('A' ) , ord('Z' ) + 1 )]
+ [chr(_SCREAMING_SNAKE_CASE ) for i in range(ord('0' ) , ord('9' ) + 1 )]
+ ["."]
)
UpperCamelCase = frozenset(_SCREAMING_SNAKE_CASE )
UpperCamelCase = re.compile(r'_+' )
UpperCamelCase = "".join([c if c in accepted else '_' for c in text.lower()] )
UpperCamelCase = pattern.sub('_' , _SCREAMING_SNAKE_CASE ).strip('_' )
return text
def _SCREAMING_SNAKE_CASE ( self : Dict , _SCREAMING_SNAKE_CASE : List[str] ):
"""simple docstring"""
return " ".join(_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , _SCREAMING_SNAKE_CASE : bool = False ):
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCamelCase = TensorType(_SCREAMING_SNAKE_CASE )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
UpperCamelCase = tf.constant
UpperCamelCase = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
UpperCamelCase = torch.tensor
UpperCamelCase = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
UpperCamelCase = jnp.array
UpperCamelCase = _is_jax
else:
UpperCamelCase = np.asarray
UpperCamelCase = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
UpperCamelCase = [inputs]
if not is_tensor(_SCREAMING_SNAKE_CASE ):
UpperCamelCase = as_tensor(_SCREAMING_SNAKE_CASE )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : str , _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str="" , _SCREAMING_SNAKE_CASE : Optional[Any]="pt" ):
"""simple docstring"""
UpperCamelCase = [0, 0, 0]
UpperCamelCase = [artist] * len(self.version )
UpperCamelCase = [genres] * len(self.version )
UpperCamelCase = self.tokenize(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = self._convert_token_to_id(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCamelCase = [-INFINITY] * len(full_tokens[-1] )
UpperCamelCase = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] , tensor_type=_SCREAMING_SNAKE_CASE )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def _SCREAMING_SNAKE_CASE ( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder , ensure_ascii=_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder , ensure_ascii=_SCREAMING_SNAKE_CASE ) )
UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder , ensure_ascii=_SCREAMING_SNAKE_CASE ) )
return (artists_file, genres_file, lyrics_file)
def _SCREAMING_SNAKE_CASE ( self : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.artists_decoder.get(_SCREAMING_SNAKE_CASE )
UpperCamelCase = [self.genres_decoder.get(_SCREAMING_SNAKE_CASE ) for genre in genres_index]
UpperCamelCase = [self.lyrics_decoder.get(_SCREAMING_SNAKE_CASE ) for character in lyric_index]
return artist, genres, lyrics
| 706 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : int = logging.get_logger(__name__)
__magic_name__ : Optional[Any] = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = """visual_bert"""
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int]=3_0522 , _SCREAMING_SNAKE_CASE : Dict=768 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : Optional[Any]=12 , _SCREAMING_SNAKE_CASE : Any=12 , _SCREAMING_SNAKE_CASE : Any=3072 , _SCREAMING_SNAKE_CASE : Dict="gelu" , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Optional[int]=512 , _SCREAMING_SNAKE_CASE : List[Any]=2 , _SCREAMING_SNAKE_CASE : str=0.0_2 , _SCREAMING_SNAKE_CASE : Any=1E-1_2 , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Dict=1 , _SCREAMING_SNAKE_CASE : Optional[int]=0 , _SCREAMING_SNAKE_CASE : str=2 , **_SCREAMING_SNAKE_CASE : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = visual_embedding_dim
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = bypass_transformer
UpperCamelCase = special_visual_initialize
| 410 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _lowerCAmelCase ( __UpperCAmelCase , unittest.TestCase ):
SCREAMING_SNAKE_CASE_: List[Any] = CTRLTokenizer
SCREAMING_SNAKE_CASE_: Dict = False
SCREAMING_SNAKE_CASE_: Optional[int] = False
def A ( self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_SCREAMING_SNAKE_CASE : str = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_SCREAMING_SNAKE_CASE : Dict = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
_SCREAMING_SNAKE_CASE : str = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'unk_token': '<unk>'}
_SCREAMING_SNAKE_CASE : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_SCREAMING_SNAKE_CASE : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def A ( self , **lowerCAmelCase_ ) -> Optional[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def A ( self , lowerCAmelCase_ ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : int = 'adapt react readapt apt'
_SCREAMING_SNAKE_CASE : Union[str, Any] = 'adapt react readapt apt'
return input_text, output_text
def A ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : Optional[Any] = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_SCREAMING_SNAKE_CASE : Any = 'adapt react readapt apt'
_SCREAMING_SNAKE_CASE : Any = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
_SCREAMING_SNAKE_CASE : int = tokens + [tokenizer.unk_token]
_SCREAMING_SNAKE_CASE : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , lowerCAmelCase_ )
| 621 |
"""simple docstring"""
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase__ = 16
lowerCAmelCase__ = 32
def lowercase__ ( lowerCamelCase, lowerCamelCase = 16 ):
_SCREAMING_SNAKE_CASE : List[str] = AutoTokenizer.from_pretrained('bert-base-cased' )
_SCREAMING_SNAKE_CASE : Any = load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE : int = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCamelCase, max_length=lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_SCREAMING_SNAKE_CASE : Tuple = datasets.map(
lowerCamelCase, batched=lowerCamelCase, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE : Dict = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_SCREAMING_SNAKE_CASE : List[Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_SCREAMING_SNAKE_CASE : List[str] = 16
elif accelerator.mixed_precision != "no":
_SCREAMING_SNAKE_CASE : Dict = 8
else:
_SCREAMING_SNAKE_CASE : str = None
return tokenizer.pad(
lowerCamelCase, padding='longest', max_length=lowerCamelCase, pad_to_multiple_of=lowerCamelCase, return_tensors='pt', )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE : List[str] = DataLoader(
tokenized_datasets['train'], shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = DataLoader(
tokenized_datasets['validation'], shuffle=lowerCamelCase, collate_fn=lowerCamelCase, batch_size=lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCAmelCase__ = mocked_dataloaders # noqa: F811
def lowercase__ ( lowerCamelCase, lowerCamelCase ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', lowerCamelCase ) == "1":
_SCREAMING_SNAKE_CASE : int = 2
# Initialize accelerator
_SCREAMING_SNAKE_CASE : Optional[int] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE : Any = config['lr']
_SCREAMING_SNAKE_CASE : Tuple = int(config['num_epochs'] )
_SCREAMING_SNAKE_CASE : Dict = int(config['seed'] )
_SCREAMING_SNAKE_CASE : List[Any] = int(config['batch_size'] )
_SCREAMING_SNAKE_CASE : Tuple = evaluate.load('glue', 'mrpc' )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=lowerCamelCase )
def inner_training_loop(lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE : Tuple = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_SCREAMING_SNAKE_CASE : Any = model.to(accelerator.device )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE : str = AdamW(params=model.parameters(), lr=lowerCamelCase )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_dataloaders(lowerCamelCase, lowerCamelCase )
# Instantiate scheduler
_SCREAMING_SNAKE_CASE : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=lowerCamelCase, num_warmup_steps=100, num_training_steps=(len(lowerCamelCase ) * num_epochs), )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.prepare(
lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase )
# Now we train the model
for epoch in range(lowerCamelCase ):
model.train()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
_SCREAMING_SNAKE_CASE : Optional[Any] = model(**lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[str] = outputs.loss
accelerator.backward(lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE : List[str] = model(**lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = outputs.logits.argmax(dim=-1 )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCamelCase, references=lowerCamelCase, )
_SCREAMING_SNAKE_CASE : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""", lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def lowercase__ ( ):
_SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCamelCase, default=lowerCamelCase, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
_SCREAMING_SNAKE_CASE : Any = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCamelCase, lowerCamelCase )
if __name__ == "__main__":
main()
| 621 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class __snake_case :
"""simple docstring"""
def __init__( self : int , lowerCamelCase : List[Any] , lowerCamelCase : List[Any]=13 , lowerCamelCase : Optional[Any]=64 , lowerCamelCase : List[Any]=2 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Tuple=True , lowerCamelCase : Optional[int]=True , lowerCamelCase : Optional[int]=32 , lowerCamelCase : str=5 , lowerCamelCase : str=4 , lowerCamelCase : Optional[Any]=37 , lowerCamelCase : str="gelu" , lowerCamelCase : Any=0.1 , lowerCamelCase : List[str]=0.1 , lowerCamelCase : Union[str, Any]=10 , lowerCamelCase : Any=0.02 , lowerCamelCase : Any=[1, 16, 4, 4] , lowerCamelCase : Dict=None , ) -> List[Any]:
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : List[str] = batch_size
lowerCAmelCase_ : Optional[Any] = image_size
lowerCAmelCase_ : List[Any] = patch_size
lowerCAmelCase_ : Any = num_channels
lowerCAmelCase_ : Dict = is_training
lowerCAmelCase_ : Any = use_labels
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : Any = num_hidden_layers
lowerCAmelCase_ : Union[str, Any] = num_attention_heads
lowerCAmelCase_ : int = intermediate_size
lowerCAmelCase_ : Union[str, Any] = hidden_act
lowerCAmelCase_ : Any = hidden_dropout_prob
lowerCAmelCase_ : List[str] = attention_probs_dropout_prob
lowerCAmelCase_ : int = type_sequence_label_size
lowerCAmelCase_ : Dict = initializer_range
lowerCAmelCase_ : Union[str, Any] = scope
lowerCAmelCase_ : Optional[Any] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
lowerCAmelCase_ : List[str] = (self.image_size // 32) ** 2
lowerCAmelCase_ : str = num_patches + 1
def __lowercase ( self : List[str] ) -> Dict:
lowerCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase_ : List[Any] = None
if self.use_labels:
lowerCAmelCase_ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : int ) -> Optional[Any]:
lowerCAmelCase_ : Dict = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=lowerCamelCase , )
def __lowercase ( self : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Union[str, Any] ) -> Tuple:
lowerCAmelCase_ : Tuple = ViTHybridModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Optional[int] = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowercase ( self : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict ) -> Any:
lowerCAmelCase_ : List[Any] = self.type_sequence_label_size
lowerCAmelCase_ : str = ViTHybridForImageClassification(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
lowerCAmelCase_ : Tuple = model(lowerCamelCase , labels=lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase ( self : Optional[Any] ) -> Tuple:
lowerCAmelCase_ : int = self.prepare_config_and_inputs()
lowerCAmelCase_ : Dict = config_and_inputs
lowerCAmelCase_ : List[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,unittest.TestCase):
"""simple docstring"""
lowercase = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
lowercase = (
{'feature-extraction': ViTHybridModel, 'image-classification': ViTHybridForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
def __lowercase ( self : Optional[Any] ) -> str:
lowerCAmelCase_ : List[Any] = ViTHybridModelTester(self )
lowerCAmelCase_ : Dict = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase , hidden_size=37 )
def __lowercase ( self : Optional[Any] ) -> str:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def __lowercase ( self : Optional[Any] ) -> Tuple:
pass
def __lowercase ( self : str ) -> str:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : str = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase_ : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase , nn.Linear ) )
def __lowercase ( self : Any ) -> List[str]:
lowerCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase_ : Dict = model_class(lowerCamelCase )
lowerCAmelCase_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase_ : Dict = [*signature.parameters.keys()]
lowerCAmelCase_ : int = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , lowerCamelCase )
def __lowercase ( self : List[Any] ) -> Any:
lowerCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __lowercase ( self : str ) -> Any:
lowerCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def __lowercase ( self : str ) -> Dict:
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase_ : Optional[Any] = _config_zero_init(lowerCamelCase )
for model_class in self.all_model_classes:
lowerCAmelCase_ : Tuple = model_class(config=lowerCamelCase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
lowerCAmelCase_ : Optional[Any] = [F'{name}.{key}' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def __lowercase ( self : Optional[int] ) -> Tuple:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Tuple = ViTHybridModel.from_pretrained(lowerCamelCase )
self.assertIsNotNone(lowerCamelCase )
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@cached_property
def __lowercase ( self : Any ) -> List[str]:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowercase ( self : Union[str, Any] ) -> Dict:
lowerCAmelCase_ : List[Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
lowerCamelCase )
lowerCAmelCase_ : Any = self.default_image_processor
lowerCAmelCase_ : List[Any] = prepare_img()
lowerCAmelCase_ : Union[str, Any] = image_processor(images=lowerCamelCase , return_tensors="""pt""" ).to(lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase_ : Any = model(**lowerCamelCase )
# verify the logits
lowerCAmelCase_ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , lowerCamelCase )
lowerCAmelCase_ : Dict = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) )
@slow
@require_accelerate
def __lowercase ( self : Optional[Any] ) -> List[str]:
lowerCAmelCase_ : Any = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
lowerCAmelCase_ : List[str] = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
lowerCAmelCase_ : Any = prepare_img()
lowerCAmelCase_ : List[Any] = image_processor(images=lowerCamelCase , return_tensors="""pt""" )
lowerCAmelCase_ : Any = model(**lowerCamelCase )
lowerCAmelCase_ : str = outputs.logits
# model predicts one of the 1000 ImageNet classes
lowerCAmelCase_ : List[str] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 700 |
'''simple docstring'''
import numpy as np
def UpperCamelCase_ ( A__ : np.array ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def UpperCamelCase_ ( A__ : np.array ):
'''simple docstring'''
return vector * sigmoid(1.702 * vector )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 398 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class a__ ( __magic_name__ ):
@staticmethod
@abstractmethod
def a_ ( UpperCamelCase_ : ArgumentParser):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def a_ ( self : List[Any]):
"""simple docstring"""
raise NotImplementedError()
| 77 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , UpperCamelCase_ : int , UpperCamelCase_ : Any=13 , UpperCamelCase_ : Optional[int]=3 , UpperCamelCase_ : int=224 , UpperCamelCase_ : int=30 , UpperCamelCase_ : str=400 , UpperCamelCase_ : List[Any]=True , UpperCamelCase_ : Optional[Any]=None , UpperCamelCase_ : Optional[int]=True , UpperCamelCase_ : Optional[int]=[0.5, 0.5, 0.5] , UpperCamelCase_ : Optional[Any]=[0.5, 0.5, 0.5] , ):
"""simple docstring"""
__UpperCAmelCase : Tuple = size if size is not None else {"height": 18, "width": 18}
__UpperCAmelCase : List[Any] = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Tuple = num_channels
__UpperCAmelCase : List[Any] = image_size
__UpperCAmelCase : str = min_resolution
__UpperCAmelCase : Tuple = max_resolution
__UpperCAmelCase : Optional[Any] = do_resize
__UpperCAmelCase : Any = size
__UpperCAmelCase : Any = do_normalize
__UpperCAmelCase : Any = image_mean
__UpperCAmelCase : Optional[Any] = image_std
def a_ ( self : str):
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class a__ ( __magic_name__ , unittest.TestCase ):
lowercase_ = ViTImageProcessor if is_vision_available() else None
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = EfficientFormerImageProcessorTester(self)
@property
def a_ ( self : Union[str, Any]):
"""simple docstring"""
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(UpperCamelCase_ , "image_mean"))
self.assertTrue(hasattr(UpperCamelCase_ , "image_std"))
self.assertTrue(hasattr(UpperCamelCase_ , "do_normalize"))
self.assertTrue(hasattr(UpperCamelCase_ , "do_resize"))
self.assertTrue(hasattr(UpperCamelCase_ , "size"))
def a_ ( self : Dict):
"""simple docstring"""
pass
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
__UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , Image.Image)
# Test not batched input
__UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self : Tuple):
"""simple docstring"""
__UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__UpperCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , np.ndarray)
# Test not batched input
__UpperCAmelCase : Tuple = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Any = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self : Any):
"""simple docstring"""
__UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__UpperCAmelCase : Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_)
for image in image_inputs:
self.assertIsInstance(UpperCamelCase_ , torch.Tensor)
# Test not batched input
__UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
__UpperCAmelCase : Optional[int] = image_processor(UpperCamelCase_ , return_tensors="pt").pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 77 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(snake_case_ ) , "Tatoeba directory does not exist." )
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
lowercase_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCAmelCase )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def A__ ( self ) -> Tuple:
'''simple docstring'''
lowercase_ , lowercase_ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 601 |
import string
import numpy
def SCREAMING_SNAKE_CASE_ ( __lowerCamelCase: int , __lowerCamelCase: int ):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a , __lowerCamelCase )
class __lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowerCAmelCase__ = numpy.vectorize(lambda snake_case_ : x % 36 )
lowerCAmelCase__ = numpy.vectorize(snake_case_ )
def __init__( self , UpperCAmelCase ) -> None:
'''simple docstring'''
lowercase_ = self.modulus(UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowercase_ = encrypt_key.shape[0]
def A__ ( self , UpperCAmelCase ) -> int:
'''simple docstring'''
return self.key_string.index(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
return self.key_string[round(UpperCAmelCase )]
def A__ ( self ) -> None:
'''simple docstring'''
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = len(self.key_string )
if greatest_common_divisor(UpperCAmelCase , len(self.key_string ) ) != 1:
lowercase_ = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = [char for char in text.upper() if char in self.key_string]
lowercase_ = chars[-1]
while len(UpperCAmelCase ) % self.break_key != 0:
chars.append(UpperCAmelCase )
return "".join(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.process_text(text.upper() )
lowercase_ = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(UpperCAmelCase ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(self.encrypt_key.dot(UpperCAmelCase ) ).T.tolist()[
0
]
lowercase_ = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def A__ ( self ) -> numpy.ndarray:
'''simple docstring'''
lowercase_ = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowercase_ = det % len(self.key_string )
lowercase_ = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowercase_ = i
break
lowercase_ = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(UpperCAmelCase ) )
def A__ ( self , UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase_ = self.make_decrypt_key()
lowercase_ = self.process_text(text.upper() )
lowercase_ = ""
for i in range(0 , len(UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
lowercase_ = text[i : i + self.break_key]
lowercase_ = [self.replace_letters(UpperCAmelCase ) for char in batch]
lowercase_ = numpy.array([vec] ).T
lowercase_ = self.modulus(decrypt_key.dot(UpperCAmelCase ) ).T.tolist()[0]
lowercase_ = "".join(
self.replace_digits(UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def SCREAMING_SNAKE_CASE_ ( ):
'''simple docstring'''
lowercase_ = int(input("Enter the order of the encryption key: " ) )
lowercase_ = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(__lowerCamelCase ):
lowercase_ = [int(__lowerCamelCase ) for x in input().split()]
hill_matrix.append(__lowerCamelCase )
lowercase_ = HillCipher(numpy.array(__lowerCamelCase ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
lowercase_ = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
lowercase_ = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(__lowerCamelCase ) )
elif option == "2":
lowercase_ = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(__lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 601 | 1 |
import os
from datetime import datetime as dt
from github import Github
a__ = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''enhancement''',
'''new pipeline/model''',
'''new scheduler''',
'''wip''',
]
def __UpperCAmelCase ( ) -> List[str]:
"""simple docstring"""
_a : List[str] = Github(os.environ['''GITHUB_TOKEN'''] )
_a : Tuple = g.get_repo('''huggingface/diffusers''' )
_a : str = repo.get_issues(state='''open''' )
for issue in open_issues:
_a : Optional[int] = sorted(issue.get_comments() ,key=lambda __a : i.created_at ,reverse=__a )
_a : Union[str, Any] = comments[0] if len(__a ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 14 |
def _lowerCamelCase ( __A : int ) -> str:
_UpperCAmelCase : Tuple = int(__A )
if decimal in (0, 1): # Exit cases for the recursion
return str(__A )
_UpperCAmelCase , _UpperCAmelCase : int = divmod(__A , 2 )
return binary_recursive(__A ) + str(__A )
def _lowerCamelCase ( __A : str ) -> str:
_UpperCAmelCase : List[Any] = str(__A ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCAmelCase : Tuple = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCAmelCase : Dict = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'''{negative}0b{binary_recursive(int(__A ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 485 | 0 |
"""simple docstring"""
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
__UpperCamelCase = None
__UpperCamelCase = graph
self._normalize_graph(__UpperCAmelCase , __UpperCAmelCase )
__UpperCamelCase = len(__UpperCAmelCase )
__UpperCamelCase = None
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
if sources is int:
__UpperCamelCase = [sources]
if sinks is int:
__UpperCamelCase = [sinks]
if len(__UpperCAmelCase ) == 0 or len(__UpperCAmelCase ) == 0:
return
__UpperCamelCase = sources[0]
__UpperCamelCase = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(__UpperCAmelCase ) > 1 or len(__UpperCAmelCase ) > 1:
__UpperCamelCase = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
__UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
__UpperCamelCase = max_input_flow
__UpperCamelCase = 0
__UpperCamelCase = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
__UpperCamelCase = max_input_flow
__UpperCamelCase = size - 1
def UpperCAmelCase ( self ):
'''simple docstring'''
if self.maximum_flow_algorithm is None:
raise Exception('You need to set maximum flow algorithm before.' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = algorithm(self )
class __lowerCAmelCase :
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = flow_network
__UpperCamelCase = flow_network.verticesCount
__UpperCamelCase = flow_network.sourceIndex
__UpperCamelCase = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
__UpperCamelCase = flow_network.graph
__UpperCamelCase = False
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.executed:
self._algorithm()
__UpperCamelCase = True
def UpperCAmelCase ( self ):
'''simple docstring'''
pass
class __lowerCAmelCase ( A_ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
# use this to save your result
__UpperCamelCase = -1
def UpperCAmelCase ( self ):
'''simple docstring'''
if not self.executed:
raise Exception('You should execute algorithm before using its result!' )
return self.maximum_flow
class __lowerCAmelCase ( A_ ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
super().__init__(__UpperCAmelCase )
__UpperCamelCase = [[0] * self.verticies_count for i in range(self.verticies_count )]
__UpperCamelCase = [0] * self.verticies_count
__UpperCamelCase = [0] * self.verticies_count
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
__UpperCamelCase = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
__UpperCamelCase = 0
while i < len(__UpperCAmelCase ):
__UpperCamelCase = vertices_list[i]
__UpperCamelCase = self.heights[vertex_index]
self.process_vertex(__UpperCAmelCase )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(__UpperCAmelCase ) )
__UpperCamelCase = 0
else:
i += 1
__UpperCamelCase = sum(self.preflow[self.source_index] )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(__UpperCAmelCase , __UpperCAmelCase )
self.relabel(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
__UpperCamelCase = self.heights[to_index]
if min_height is not None:
__UpperCamelCase = min_height + 1
if __name__ == "__main__":
UpperCamelCase : Tuple = [0]
UpperCamelCase : Dict = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
UpperCamelCase : List[Any] = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
UpperCamelCase : Dict = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
UpperCamelCase : str = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 708 |
"""simple docstring"""
import string
import numpy
def A ( snake_case :int , snake_case :int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , snake_case )
class __lowerCAmelCase :
lowercase = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
lowercase = numpy.vectorize(lambda __SCREAMING_SNAKE_CASE : x % 36 )
lowercase = numpy.vectorize(__SCREAMING_SNAKE_CASE )
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.modulus(__UpperCAmelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
__UpperCamelCase = encrypt_key.shape[0]
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.key_string.index(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
return self.key_string[round(__UpperCAmelCase )]
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = len(self.key_string )
if greatest_common_divisor(__UpperCAmelCase , len(self.key_string ) ) != 1:
__UpperCamelCase = (
F'determinant modular {req_l} of encryption key({det}) '
F'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = [char for char in text.upper() if char in self.key_string]
__UpperCamelCase = chars[-1]
while len(__UpperCAmelCase ) % self.break_key != 0:
chars.append(__UpperCAmelCase )
return "".join(__UpperCAmelCase )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = ''
for i in range(0 , len(__UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(__UpperCAmelCase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(self.encrypt_key.dot(__UpperCAmelCase ) ).T.tolist()[
0
]
__UpperCamelCase = ''.join(
self.replace_digits(__UpperCAmelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
__UpperCamelCase = det % len(self.key_string )
__UpperCamelCase = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
__UpperCamelCase = i
break
__UpperCamelCase = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__UpperCAmelCase ) )
def UpperCAmelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__UpperCamelCase = self.make_decrypt_key()
__UpperCamelCase = self.process_text(text.upper() )
__UpperCamelCase = ''
for i in range(0 , len(__UpperCAmelCase ) - self.break_key + 1 , self.break_key ):
__UpperCamelCase = text[i : i + self.break_key]
__UpperCamelCase = [self.replace_letters(__UpperCAmelCase ) for char in batch]
__UpperCamelCase = numpy.array([vec] ).T
__UpperCamelCase = self.modulus(decrypt_key.dot(__UpperCAmelCase ) ).T.tolist()[0]
__UpperCamelCase = ''.join(
self.replace_digits(__UpperCAmelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def A ( ) -> None:
__UpperCamelCase = int(input('Enter the order of the encryption key: ' ) )
__UpperCamelCase = []
print('Enter each row of the encryption key with space separated integers' )
for _ in range(snake_case ):
__UpperCamelCase = [int(snake_case ) for x in input().split()]
hill_matrix.append(snake_case )
__UpperCamelCase = HillCipher(numpy.array(snake_case ) )
print('Would you like to encrypt or decrypt some text? (1 or 2)' )
__UpperCamelCase = input('\n1. Encrypt\n2. Decrypt\n' )
if option == "1":
__UpperCamelCase = input('What text would you like to encrypt?: ' )
print('Your encrypted text is:' )
print(hc.encrypt(snake_case ) )
elif option == "2":
__UpperCamelCase = input('What text would you like to decrypt?: ' )
print('Your decrypted text is:' )
print(hc.decrypt(snake_case ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 293 | 0 |
from math import factorial
def lowerCAmelCase_ ( lowercase: int = 100 ) -> int:
'''simple docstring'''
return sum(int(__UpperCamelCase ) for x in str(factorial(__UpperCamelCase ) ) )
if __name__ == "__main__":
print(solution(int(input('''Enter the Number: ''').strip()))) | 271 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
SCREAMING_SNAKE_CASE__ = {
'gwf-440k': {
'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-small-190k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 6_5_5_3_6,
},
'jmann-large-580k': {
'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt',
'sample_rate': 4_8_0_0_0,
'sample_size': 1_3_1_0_7_2,
},
'maestro-uncond-150k': {
'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'unlocked-uncond-250k': {
'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
'honk-140k': {
'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt',
'sample_rate': 1_6_0_0_0,
'sample_size': 6_5_5_3_6,
},
}
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> str:
return torch.atana(__UpperCamelCase , __UpperCamelCase ) / math.pi * 2
def lowercase__ ( __UpperCamelCase )-> Tuple:
UpperCamelCase = torch.sin(t * math.pi / 2 ) ** 2
UpperCamelCase = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(__UpperCamelCase , __UpperCamelCase )
class a_ ( lowerCamelCase ):
pass
class a_ ( nn.Module ):
def __init__( self , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
super().__init__()
UpperCamelCase = DiffusionAttnUnetaD(_SCREAMING_SNAKE_CASE , n_attn_layers=4 )
UpperCamelCase = deepcopy(self.diffusion )
UpperCamelCase = torch.quasirandom.SobolEngine(1 , scramble=_SCREAMING_SNAKE_CASE )
def lowercase__ ( __UpperCamelCase )-> List[str]:
UpperCamelCase = MODELS_MAP[model_name]["""url"""]
os.system(F"wget {url} ./" )
return F"./{model_name}.ckpt"
SCREAMING_SNAKE_CASE__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
}
SCREAMING_SNAKE_CASE__ = {
'8': 'resnets.0',
'9': 'attentions.0',
'10': 'resnets.1',
'11': 'attentions.1',
'12': 'resnets.2',
'13': 'attentions.2',
}
SCREAMING_SNAKE_CASE__ = {
'1': 'resnets.0',
'2': 'attentions.0',
'3': 'resnets.1',
'4': 'attentions.1',
'5': 'resnets.2',
'6': 'attentions.2',
'8': 'resnets.3',
'9': 'attentions.3',
'10': 'resnets.4',
'11': 'attentions.4',
'12': 'resnets.5',
'13': 'attentions.5',
}
SCREAMING_SNAKE_CASE__ = {
'0': 'resnets.0',
'1': 'resnets.1',
'2': 'resnets.2',
'4': 'resnets.0',
'5': 'resnets.1',
'6': 'resnets.2',
}
SCREAMING_SNAKE_CASE__ = {
'skip': 'conv_skip',
'main.0': 'conv_1',
'main.1': 'group_norm_1',
'main.3': 'conv_2',
'main.4': 'group_norm_2',
}
SCREAMING_SNAKE_CASE__ = {
'norm': 'group_norm',
'qkv_proj': ['query', 'key', 'value'],
'out_proj': ['proj_attn'],
}
def lowercase__ ( __UpperCamelCase )-> List[str]:
if name.startswith("""skip""" ):
return name.replace("""skip""" , RES_CONV_MAP["""skip"""] )
# name has to be of format main.{digit}
if not name.startswith("""main.""" ):
raise ValueError(F"ResConvBlock error with {name}" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def lowercase__ ( __UpperCamelCase )-> List[Any]:
for key, value in ATTN_MAP.items():
if name.startswith(__UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ):
return name.replace(__UpperCamelCase , __UpperCamelCase )
elif name.startswith(__UpperCamelCase ):
return [name.replace(__UpperCamelCase , __UpperCamelCase ) for v in value]
raise ValueError(F"Attn error with {name}" )
def lowercase__ ( __UpperCamelCase , __UpperCamelCase=13 )-> int:
UpperCamelCase = input_string
if string.split(""".""" )[0] == "timestep_embed":
return string.replace("""timestep_embed""" , """time_proj""" )
UpperCamelCase = 0
if string.startswith("""net.3.""" ):
depth += 1
UpperCamelCase = string[6:]
elif string.startswith("""net.""" ):
UpperCamelCase = string[4:]
while string.startswith("""main.7.""" ):
depth += 1
UpperCamelCase = string[7:]
if string.startswith("""main.""" ):
UpperCamelCase = string[5:]
# mid block
if string[:2].isdigit():
UpperCamelCase = string[:2]
UpperCamelCase = string[2:]
else:
UpperCamelCase = string[0]
UpperCamelCase = string[1:]
if depth == max_depth:
UpperCamelCase = MID_NUM_TO_LAYER[layer_num]
UpperCamelCase = """mid_block"""
elif depth > 0 and int(__UpperCamelCase ) < 7:
UpperCamelCase = DOWN_NUM_TO_LAYER[layer_num]
UpperCamelCase = F"down_blocks.{depth}"
elif depth > 0 and int(__UpperCamelCase ) > 7:
UpperCamelCase = UP_NUM_TO_LAYER[layer_num]
UpperCamelCase = F"up_blocks.{max_depth - depth - 1}"
elif depth == 0:
UpperCamelCase = DEPTH_0_TO_LAYER[layer_num]
UpperCamelCase = F"up_blocks.{max_depth - 1}" if int(__UpperCamelCase ) > 3 else """down_blocks.0"""
if not string_left.startswith(""".""" ):
raise ValueError(F"Naming error with {input_string} and string_left: {string_left}." )
UpperCamelCase = string_left[1:]
if "resnets" in new_layer:
UpperCamelCase = convert_resconv_naming(__UpperCamelCase )
elif "attentions" in new_layer:
UpperCamelCase = convert_attn_naming(__UpperCamelCase )
UpperCamelCase = new_string_left
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = prefix + """.""" + new_layer + """.""" + string_left
else:
UpperCamelCase = [prefix + """.""" + new_layer + """.""" + s for s in string_left]
return new_string
def lowercase__ ( __UpperCamelCase )-> Tuple:
UpperCamelCase = {}
for k, v in state_dict.items():
if k.endswith("""kernel""" ):
# up- and downsample layers, don't have trainable weights
continue
UpperCamelCase = rename(__UpperCamelCase )
# check if we need to transform from Conv => Linear for attention
if isinstance(__UpperCamelCase , __UpperCamelCase ):
UpperCamelCase = transform_conv_attns(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
UpperCamelCase = v
return new_state_dict
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Optional[int]:
if len(__UpperCamelCase ) == 1:
if len(v.shape ) == 3:
# weight
UpperCamelCase = v[:, :, 0]
else:
# bias
UpperCamelCase = v
else:
# qkv matrices
UpperCamelCase = v.shape[0]
UpperCamelCase = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCamelCase = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCamelCase = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" )
UpperCamelCase = args.model_path.split("""/""" )[-1].split(""".""" )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"Make sure to provide one of the official model names {MODELS_MAP.keys()}"
UpperCamelCase = download(__UpperCamelCase )
UpperCamelCase = MODELS_MAP[model_name]["""sample_rate"""]
UpperCamelCase = MODELS_MAP[model_name]["""sample_size"""]
UpperCamelCase = Object()
UpperCamelCase = sample_size
UpperCamelCase = sample_rate
UpperCamelCase = 0
UpperCamelCase = UNetaDModel(sample_size=__UpperCamelCase , sample_rate=__UpperCamelCase )
UpperCamelCase = diffusers_model.state_dict()
UpperCamelCase = DiffusionUncond(__UpperCamelCase )
orig_model.load_state_dict(torch.load(args.model_path , map_location=__UpperCamelCase )["""state_dict"""] )
UpperCamelCase = orig_model.diffusion_ema.eval()
UpperCamelCase = orig_model.state_dict()
UpperCamelCase = rename_orig_weights(__UpperCamelCase )
UpperCamelCase = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCamelCase = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(__UpperCamelCase ) == 0, F"Problem with {renamed_minus_diffusers}"
assert all(k.endswith("""kernel""" ) for k in list(__UpperCamelCase ) ), F"Problem with {diffusers_minus_renamed}"
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"
if key == "time_proj.weight":
UpperCamelCase = value.squeeze()
UpperCamelCase = value
diffusers_model.load_state_dict(__UpperCamelCase )
UpperCamelCase = 100
UpperCamelCase = 33
UpperCamelCase = IPNDMScheduler(num_train_timesteps=__UpperCamelCase )
UpperCamelCase = torch.manual_seed(__UpperCamelCase )
UpperCamelCase = torch.randn([1, 2, config.sample_size] , generator=__UpperCamelCase ).to(__UpperCamelCase )
UpperCamelCase = torch.linspace(1 , 0 , steps + 1 , device=__UpperCamelCase )[:-1]
UpperCamelCase = get_crash_schedule(__UpperCamelCase )
UpperCamelCase = DanceDiffusionPipeline(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
UpperCamelCase = torch.manual_seed(33 )
UpperCamelCase = pipe(num_inference_steps=__UpperCamelCase , generator=__UpperCamelCase ).audios
UpperCamelCase = sampling.iplms_sample(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , {} )
UpperCamelCase = generated.clamp(-1 , 1 )
UpperCamelCase = (generated - audio).abs().sum()
UpperCamelCase = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("""Diff sum""" , __UpperCamelCase )
print("""Diff max""" , __UpperCamelCase )
assert diff_max < 1E-3, F"Diff max: {diff_max} is too much :-/"
print(F"Conversion for {model_name} successful!" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.')
SCREAMING_SNAKE_CASE__ = parser.parse_args()
main(args)
| 301 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class __lowerCamelCase ( lowerCamelCase_ ):
"""simple docstring"""
a_: List[Any] = 'Salesforce/blip-image-captioning-base'
a_: Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
a_: str = 'image_captioner'
a_: Dict = AutoModelForVisionaSeq
a_: Optional[Any] = ['image']
a_: List[str] = ['text']
def __init__( self : List[str] , *lowerCamelCase_ : Union[str, Any] , **lowerCamelCase_ : Union[str, Any] ):
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : "Image" ):
return self.pre_processor(images=UpperCAmelCase__ , return_tensors="""pt""" )
def lowerCAmelCase__ ( self : Any , lowerCamelCase_ : Union[str, Any] ):
return self.model.generate(**UpperCAmelCase__ )
def lowerCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : List[Any] ):
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip()
| 718 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaImgaImgPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCamelCase ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_: Any = KandinskyVaaImgaImgPipeline
a_: Optional[int] = ["""image_embeds""", """negative_image_embeds""", """image"""]
a_: int = [
"""image_embeds""",
"""negative_image_embeds""",
"""image""",
]
a_: Dict = [
"""generator""",
"""height""",
"""width""",
"""strength""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_: Dict = False
@property
def lowerCAmelCase__ ( self : Any ):
return 32
@property
def lowerCAmelCase__ ( self : str ):
return 32
@property
def lowerCAmelCase__ ( self : Dict ):
return self.time_input_dim
@property
def lowerCAmelCase__ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self : Any ):
return 100
@property
def lowerCAmelCase__ ( self : Optional[Any] ):
torch.manual_seed(0 )
_lowerCAmelCase ={
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
_lowerCAmelCase =UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self : Dict ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ ( self : str ):
torch.manual_seed(0 )
_lowerCAmelCase =VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ ( self : Optional[int] ):
_lowerCAmelCase =self.dummy_unet
_lowerCAmelCase =self.dummy_movq
_lowerCAmelCase ={
"""num_train_timesteps""": 1000,
"""beta_schedule""": """linear""",
"""beta_start""": 0.0_0085,
"""beta_end""": 0.012,
"""clip_sample""": False,
"""set_alpha_to_one""": False,
"""steps_offset""": 0,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
}
_lowerCAmelCase =DDIMScheduler(**lowerCamelCase_ )
_lowerCAmelCase ={
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def lowerCAmelCase__ ( self : int , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=0 ):
_lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowerCAmelCase =floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
_lowerCAmelCase =floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
_lowerCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowerCAmelCase =Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert("""RGB""" ).resize((256, 256) )
if str(lowerCamelCase_ ).startswith("""mps""" ):
_lowerCAmelCase =torch.manual_seed(lowerCamelCase_ )
else:
_lowerCAmelCase =torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
_lowerCAmelCase ={
"""image""": init_image,
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""num_inference_steps""": 10,
"""guidance_scale""": 7.0,
"""strength""": 0.2,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self : Union[str, Any] ):
_lowerCAmelCase ="""cpu"""
_lowerCAmelCase =self.get_dummy_components()
_lowerCAmelCase =self.pipeline_class(**lowerCamelCase_ )
_lowerCAmelCase =pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
_lowerCAmelCase =output.images
_lowerCAmelCase =pipe(
**self.get_dummy_inputs(lowerCamelCase_ ) , return_dict=lowerCamelCase_ , )[0]
_lowerCAmelCase =image[0, -3:, -3:, -1]
_lowerCAmelCase =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_lowerCAmelCase =np.array(
[0.619_9778, 0.6398_4406, 0.4614_5785, 0.6294_4984, 0.562_2215, 0.4730_6132, 0.4744_1456, 0.460_7606, 0.4871_9263] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self : int ):
_lowerCAmelCase =load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_img2img_frog.npy""" )
_lowerCAmelCase =load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" )
_lowerCAmelCase ="""A red cartoon frog, 4k"""
_lowerCAmelCase =KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
_lowerCAmelCase =KandinskyVaaImgaImgPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
_lowerCAmelCase =pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
_lowerCAmelCase =torch.Generator(device="""cpu""" ).manual_seed(0 )
_lowerCAmelCase , _lowerCAmelCase =pipe_prior(
lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
_lowerCAmelCase =pipeline(
image=lowerCamelCase_ , image_embeds=lowerCamelCase_ , negative_image_embeds=lowerCamelCase_ , generator=lowerCamelCase_ , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , )
_lowerCAmelCase =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCamelCase_ , lowerCamelCase_ )
| 149 | 0 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError("To use the rich extension, install rich with `pip install rich`")
| 632 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Tuple = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 297 | 0 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
_lowercase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
UpperCamelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
UpperCamelCase__ = field(
default=A__ , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
UpperCamelCase__ = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase__ = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
UpperCamelCase__ = field(
default=A__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class _UpperCAmelCase :
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''} )
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''Train language if it is different from the evaluation language.'''} )
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
UpperCamelCase__ = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
UpperCamelCase__ = field(
default=A__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
UpperCamelCase__ = field(
default=A__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def lowerCAmelCase__ ( )-> Optional[int]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A__ , A__ , A__ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , UpperCamelCase_ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A__ = training_args.get_process_log_level()
logger.setLevel(UpperCamelCase_ )
datasets.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.set_verbosity(UpperCamelCase_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
A__ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A__ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
A__ = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
A__ = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A__ = train_dataset.features['''label'''].names
if training_args.do_eval:
A__ = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A__ = eval_dataset.features['''label'''].names
if training_args.do_predict:
A__ = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
A__ = predict_dataset.features['''label'''].names
# Labels
A__ = len(UpperCamelCase_ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A__ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase_ , idalabel={str(UpperCamelCase_ ): label for i, label in enumerate(UpperCamelCase_ )} , labelaid={label: i for i, label in enumerate(UpperCamelCase_ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A__ = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
A__ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
A__ = False
def preprocess_function(UpperCamelCase_ : str ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=UpperCamelCase_ , max_length=data_args.max_seq_length , truncation=UpperCamelCase_ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
A__ = min(len(UpperCamelCase_ ) , data_args.max_train_samples )
A__ = train_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
A__ = train_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(UpperCamelCase_ ) ) , 3 ):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}." )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A__ = min(len(UpperCamelCase_ ) , data_args.max_eval_samples )
A__ = eval_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
A__ = eval_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
A__ = min(len(UpperCamelCase_ ) , data_args.max_predict_samples )
A__ = predict_dataset.select(range(UpperCamelCase_ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
A__ = predict_dataset.map(
UpperCamelCase_ , batched=UpperCamelCase_ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
A__ = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(UpperCamelCase_ : EvalPrediction ):
A__ = p.predictions[0] if isinstance(p.predictions , UpperCamelCase_ ) else p.predictions
A__ = np.argmax(UpperCamelCase_ , axis=1 )
return metric.compute(predictions=UpperCamelCase_ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
A__ = default_data_collator
elif training_args.fpaa:
A__ = DataCollatorWithPadding(UpperCamelCase_ , pad_to_multiple_of=8 )
else:
A__ = None
# Initialize our Trainer
A__ = Trainer(
model=UpperCamelCase_ , args=UpperCamelCase_ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=UpperCamelCase_ , tokenizer=UpperCamelCase_ , data_collator=UpperCamelCase_ , )
# Training
if training_args.do_train:
A__ = None
if training_args.resume_from_checkpoint is not None:
A__ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A__ = last_checkpoint
A__ = trainer.train(resume_from_checkpoint=UpperCamelCase_ )
A__ = train_result.metrics
A__ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCamelCase_ )
)
A__ = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , UpperCamelCase_ )
trainer.save_metrics('''train''' , UpperCamelCase_ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
A__ = trainer.evaluate(eval_dataset=UpperCamelCase_ )
A__ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCamelCase_ )
A__ = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('''eval''' , UpperCamelCase_ )
trainer.save_metrics('''eval''' , UpperCamelCase_ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
A__ , A__ , A__ = trainer.predict(UpperCamelCase_ , metric_key_prefix='''predict''' )
A__ = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(UpperCamelCase_ )
)
A__ = min(UpperCamelCase_ , len(UpperCamelCase_ ) )
trainer.log_metrics('''predict''' , UpperCamelCase_ )
trainer.save_metrics('''predict''' , UpperCamelCase_ )
A__ = np.argmax(UpperCamelCase_ , axis=1 )
A__ = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase_ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(UpperCamelCase_ ):
A__ = label_list[item]
writer.write(f"{index}\t{item}\n" )
if __name__ == "__main__":
main()
| 700 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''bert'''
def __init__( self , a__=3_0_5_2_2 , a__=7_6_8 , a__=1_2 , a__=1_2 , a__=3_0_7_2 , a__="gelu" , a__=0.1 , a__=0.1 , a__=5_1_2 , a__=2 , a__=0.0_2 , a__=1e-12 , a__=0 , a__="absolute" , a__=True , a__=None , **a__ , ):
super().__init__(pad_token_id=a__ , **a__)
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = classifier_dropout
class _UpperCAmelCase ( A__ ):
@property
def snake_case_ ( self):
if self.task == "multiple-choice":
A__ = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A__ = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
('''token_type_ids''', dynamic_axis),
])
| 526 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__snake_case =None
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__snake_case ={
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
__snake_case ={
"""facebook/nllb-large-en-ro""": 1_024,
"""facebook/nllb-200-distilled-600M""": 1_024,
}
# fmt: off
__snake_case =["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : str = VOCAB_FILES_NAMES
lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : Tuple = NllbTokenizer
lowerCamelCase : List[int] = []
lowerCamelCase : List[int] = []
def __init__( self : Tuple , UpperCAmelCase__ : Tuple=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : int="<s>" , UpperCAmelCase__ : List[Any]="</s>" , UpperCAmelCase__ : List[Any]="</s>" , UpperCAmelCase__ : Dict="<s>" , UpperCAmelCase__ : Optional[Any]="<unk>" , UpperCAmelCase__ : Any="<pad>" , UpperCAmelCase__ : Tuple="<mask>" , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Union[str, Any]=None , UpperCAmelCase__ : List[Any]=None , UpperCAmelCase__ : Union[str, Any]=False , **UpperCAmelCase__ : Any , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
lowerCAmelCase = legacy_behaviour
super().__init__(
vocab_file=UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , src_lang=UpperCAmelCase__ , tgt_lang=UpperCAmelCase__ , additional_special_tokens=UpperCAmelCase__ , legacy_behaviour=UpperCAmelCase__ , **UpperCAmelCase__ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = False if not self.vocab_file else True
lowerCAmelCase = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCAmelCase = {
lang_code: self.convert_tokens_to_ids(UpperCAmelCase__ ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase = src_lang if src_lang is not None else 'eng_Latn'
lowerCAmelCase = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def __UpperCAmelCase ( self : List[str] ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str ) -> None:
lowerCAmelCase = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] , UpperCAmelCase__ : Optional[str] , **UpperCAmelCase__ : str ) -> str:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCAmelCase = src_lang
lowerCAmelCase = self(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ , return_tensors=UpperCAmelCase__ , **UpperCAmelCase__ )
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ )
lowerCAmelCase = tgt_lang_id
return inputs
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str = "eng_Latn" , UpperCAmelCase__ : Optional[List[str]] = None , UpperCAmelCase__ : str = "fra_Latn" , **UpperCAmelCase__ : Optional[Any] , ) -> BatchEncoding:
lowerCAmelCase = src_lang
lowerCAmelCase = tgt_lang
return super().prepare_seqaseq_batch(UpperCAmelCase__ , UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[Any] ) -> str:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self : Tuple ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : List[str] ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ )
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str ) -> None:
lowerCAmelCase = self.convert_tokens_to_ids(UpperCAmelCase__ )
if self.legacy_behaviour:
lowerCAmelCase = []
lowerCAmelCase = [self.eos_token_id, self.cur_lang_code]
else:
lowerCAmelCase = [self.cur_lang_code]
lowerCAmelCase = [self.eos_token_id]
lowerCAmelCase = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ):
copyfile(self.vocab_file , UpperCAmelCase__ )
return (out_vocab_file,)
| 133 |
'''simple docstring'''
from __future__ import annotations
from scipy.special import comb # type: ignore
class UpperCAmelCase_ :
def __init__( self : Union[str, Any] , UpperCAmelCase__ : list[tuple[float, float]] ) -> Optional[int]:
lowerCAmelCase = list_of_points
# Degree determines the flexibility of the curve.
# Degree = 1 will produce a straight line.
lowerCAmelCase = len(UpperCAmelCase__ ) - 1
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : float ) -> list[float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = []
for i in range(len(self.list_of_points ) ):
# basis function for each i
output_values.append(
comb(self.degree , UpperCAmelCase__ ) * ((1 - t) ** (self.degree - i)) * (t**i) )
# the basis must sum up to 1 for it to produce a valid Bezier curve.
assert round(sum(UpperCAmelCase__ ) , 5 ) == 1
return output_values
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : float ) -> tuple[float, float]:
assert 0 <= t <= 1, "Time t must be between 0 and 1."
lowerCAmelCase = self.basis_function(UpperCAmelCase__ )
lowerCAmelCase = 0.0
lowerCAmelCase = 0.0
for i in range(len(self.list_of_points ) ):
# For all points, sum up the product of i-th basis function and i-th point.
x += basis_function[i] * self.list_of_points[i][0]
y += basis_function[i] * self.list_of_points[i][1]
return (x, y)
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : float = 0.01 ) -> Union[str, Any]:
from matplotlib import pyplot as plt # type: ignore
lowerCAmelCase = [] # x coordinates of points to plot
lowerCAmelCase = [] # y coordinates of points to plot
lowerCAmelCase = 0.0
while t <= 1:
lowerCAmelCase = self.bezier_curve_function(UpperCAmelCase__ )
to_plot_x.append(value[0] )
to_plot_y.append(value[1] )
t += step_size
lowerCAmelCase = [i[0] for i in self.list_of_points]
lowerCAmelCase = [i[1] for i in self.list_of_points]
plt.plot(
UpperCAmelCase__ , UpperCAmelCase__ , color='blue' , label='Curve of Degree ' + str(self.degree ) , )
plt.scatter(UpperCAmelCase__ , UpperCAmelCase__ , color='red' , label='Control Points' )
plt.legend()
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1
BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2
BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
| 133 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import torch
from .state import AcceleratorState
from .utils import PrecisionType, PrepareForLaunch, is_mps_available, patch_environment
def _snake_case ( A , A=() , A=None , A="no" , A="29500" ) -> int:
lowerCAmelCase__ = False
lowerCAmelCase__ = False
if any(key.startswith('''KAGGLE''' ) for key in os.environ.keys() ):
lowerCAmelCase__ = True
elif "IPython" in sys.modules:
lowerCAmelCase__ = '''google.colab''' in str(sys.modules['''IPython'''].get_ipython() )
try:
lowerCAmelCase__ = PrecisionType(mixed_precision.lower() )
except ValueError:
raise ValueError(
F"""Unknown mixed_precision mode: {args.mixed_precision.lower()}. Choose between {PrecisionType.list()}.""" )
if (in_colab or in_kaggle) and (os.environ.get('''TPU_NAME''' , A ) is not None):
# TPU launch
import torch_xla.distributed.xla_multiprocessing as xmp
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To train on TPU in Colab or Kaggle Kernel, the `Accelerator` should only be initialized inside '''
'''your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if num_processes is None:
lowerCAmelCase__ = 8
lowerCAmelCase__ = PrepareForLaunch(A , distributed_type='''TPU''' )
print(F"""Launching a training on {num_processes} TPU cores.""" )
xmp.spawn(A , args=A , nprocs=A , start_method='''fork''' )
elif in_colab:
# No need for a distributed launch otherwise as it's either CPU or one GPU.
if torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on one CPU.''' )
function(*A )
else:
if num_processes is None:
raise ValueError(
'''You have to specify the number of GPUs you would like to use, add `num_processes=...` to your call.''' )
if num_processes > 1:
# Multi-GPU launch
from torch.multiprocessing import start_processes
from torch.multiprocessing.spawn import ProcessRaisedException
if len(AcceleratorState._shared_state ) > 0:
raise ValueError(
'''To launch a multi-GPU training from your notebook, the `Accelerator` should only be initialized '''
'''inside your training function. Restart your notebook and make sure no cells initializes an '''
'''`Accelerator`.''' )
if torch.cuda.is_initialized():
raise ValueError(
'''To launch a multi-GPU training from your notebook, you need to avoid running any instruction '''
'''using `torch.cuda` in any cell. Restart your notebook and make sure no cells use any CUDA '''
'''function.''' )
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=A , master_addr='''127.0.01''' , master_port=A , mixed_precision=A ):
lowerCAmelCase__ = PrepareForLaunch(A , distributed_type='''MULTI_GPU''' )
print(F"""Launching training on {num_processes} GPUs.""" )
try:
start_processes(A , args=A , nprocs=A , start_method='''fork''' )
except ProcessRaisedException as e:
if "Cannot re-initialize CUDA in forked subprocess" in e.args[0]:
raise RuntimeError(
'''CUDA has been initialized before the `notebook_launcher` could create a forked subprocess. '''
'''This likely stems from an outside import causing issues once the `notebook_launcher()` is called. '''
'''Please review your imports and test them when running the `notebook_launcher()` to identify '''
'''which one is problematic.''' ) from e
else:
# No need for a distributed launch otherwise as it's either CPU, GPU or MPS.
if is_mps_available():
lowerCAmelCase__ = '''1'''
print('''Launching training on MPS.''' )
elif torch.cuda.is_available():
print('''Launching training on one GPU.''' )
else:
print('''Launching training on CPU.''' )
function(*A )
def _snake_case ( A , A=() , A=2 ) -> Optional[Any]:
from torch.multiprocessing import start_processes
with tempfile.NamedTemporaryFile() as tmp_file:
# torch.distributed will expect a few environment variable to be here. We set the ones common to each
# process here (the other ones will be set be the launcher).
with patch_environment(
world_size=A , master_addr='''127.0.01''' , master_port='''29500''' , accelerate_mixed_precision='''no''' , accelerate_debug_rdv_file=tmp_file.name , accelerate_use_cpu='''yes''' , ):
lowerCAmelCase__ = PrepareForLaunch(A , debug=A )
start_processes(A , args=A , nprocs=A , start_method='''fork''' ) | 98 |
'''simple docstring'''
from __future__ import annotations
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ = 0 ) -> List[Any]:
lowerCAmelCase__ = key
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> list[str]:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase_ ) ^ key ) for ch in content]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> list[str]:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = key or self.__key or 1
# make sure key is an appropriate size
key %= 2_55
return [chr(ord(lowerCamelCase_ ) ^ key ) for ch in content]
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = 0 ) -> str:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
lowerCAmelCase__ = ''''''
for ch in content:
ans += chr(ord(lowerCamelCase_ ) ^ key )
return ans
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = 0 ) -> str:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
lowerCAmelCase__ = key or self.__key or 1
# make sure key can be any size
while key > 2_55:
key -= 2_55
# This will be returned
lowerCAmelCase__ = ''''''
for ch in content:
ans += chr(ord(lowerCamelCase_ ) ^ key )
return ans
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = 0 ) -> bool:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
try:
with open(lowerCamelCase_ ) as fin, open('''encrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.encrypt_string(lowerCamelCase_ , lowerCamelCase_ ) )
except OSError:
return False
return True
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
try:
with open(lowerCamelCase_ ) as fin, open('''decrypt.out''' , '''w+''' ) as fout:
# actual encrypt-process
for line in fin:
fout.write(self.decrypt_string(lowerCamelCase_ , lowerCamelCase_ ) )
except OSError:
return False
return True
# Tests
# crypt = XORCipher()
# key = 67
# # test encrypt
# print(crypt.encrypt("hallo welt",key))
# # test decrypt
# print(crypt.decrypt(crypt.encrypt("hallo welt",key), key))
# # test encrypt_string
# print(crypt.encrypt_string("hallo welt",key))
# # test decrypt_string
# print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key))
# if (crypt.encrypt_file("test.txt",key)):
# print("encrypt successful")
# else:
# print("encrypt unsuccessful")
# if (crypt.decrypt_file("encrypt.out",key)):
# print("decrypt successful")
# else:
# print("decrypt unsuccessful") | 98 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from multiprocessing import get_context
from pathlib import Path
import datasets
import numpy as np
from datasets import load_dataset
from parameterized import parameterized
from transformers import AutoProcessor
from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor
from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES
from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available
from ..wavaveca.test_feature_extraction_wavaveca import floats_list
if is_pyctcdecode_available():
from huggingface_hub import snapshot_download
from pyctcdecode import BeamSearchDecoderCTC
from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM
from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput
if is_torch_available():
from transformers import WavaVecaForCTC
@require_pyctcdecode
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : str ) -> Tuple:
__lowerCAmelCase = '| <pad> <unk> <s> </s> a b c d e f g h i j k'.split()
__lowerCAmelCase = dict(zip(lowercase__ , range(len(lowercase__ ) ) ) )
__lowerCAmelCase = {
'unk_token': '<unk>',
'bos_token': '<s>',
'eos_token': '</s>',
}
__lowerCAmelCase = {
'feature_size': 1,
'padding_value': 0.0,
'sampling_rate': 1_6_0_0_0,
'return_attention_mask': False,
'do_normalize': True,
}
__lowerCAmelCase = tempfile.mkdtemp()
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase = os.path.join(self.tmpdirname , lowercase__ )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase__ ) + '\n' )
with open(self.feature_extraction_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowercase__ ) + '\n' )
# load decoder from hub
__lowerCAmelCase = 'hf-internal-testing/ngram-beam-search-decoder'
def lowercase ( self : Optional[int] , **lowerCAmelCase_ : Any ) -> int:
__lowerCAmelCase = self.add_kwargs_tokens_map.copy()
kwargs.update(lowercase__ )
return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def lowercase ( self : Optional[int] , **lowerCAmelCase_ : str ) -> Optional[int]:
return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **lowercase__ )
def lowercase ( self : Any , **lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **lowercase__ )
def lowercase ( self : Optional[Any] ) -> Any:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : Union[str, Any] ) -> Tuple:
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_decoder()
__lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase__ , feature_extractor=lowercase__ , decoder=lowercase__ )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname )
# tokenizer
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase__ )
# feature extractor
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , lowercase__ )
# decoder
self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels )
self.assertEqual(
processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , )
self.assertIsInstance(processor.decoder , lowercase__ )
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = WavaVecaProcessorWithLM(
tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
processor.save_pretrained(self.tmpdirname )
# make sure that error is thrown when decoder alphabet doesn't match
__lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(
self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 )
# decoder
self.assertEqual(processor.language_model.alpha , 5.0 )
self.assertEqual(processor.language_model.beta , 3.0 )
self.assertEqual(processor.language_model.score_boundary , -7.0 )
self.assertEqual(processor.language_model.unk_score_offset , 3 )
def lowercase ( self : Tuple ) -> Optional[int]:
__lowerCAmelCase = self.get_tokenizer()
# add token to trigger raise
tokenizer.add_tokens(['xx'] )
with self.assertRaisesRegex(lowercase__ , 'include' ):
WavaVecaProcessorWithLM(
tokenizer=lowercase__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_decoder()
__lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase__ , feature_extractor=lowercase__ , decoder=lowercase__ )
__lowerCAmelCase = floats_list((3, 1_0_0_0) )
__lowerCAmelCase = feature_extractor(lowercase__ , return_tensors='np' )
__lowerCAmelCase = processor(lowercase__ , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase ( self : int ) -> Any:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_decoder()
__lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase__ , feature_extractor=lowercase__ , decoder=lowercase__ )
__lowerCAmelCase = 'This is a test string'
__lowerCAmelCase = processor(text=lowercase__ )
__lowerCAmelCase = tokenizer(lowercase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : Any , lowerCAmelCase_ : Union[str, Any]=(2, 1_0, 1_6) , lowerCAmelCase_ : Optional[Any]=7_7 ) -> Optional[int]:
np.random.seed(lowercase__ )
return np.random.rand(*lowercase__ )
def lowercase ( self : str ) -> Any:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_decoder()
__lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase__ , feature_extractor=lowercase__ , decoder=lowercase__ )
__lowerCAmelCase = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 )
__lowerCAmelCase = processor.decode(lowercase__ )
__lowerCAmelCase = decoder.decode_beams(lowercase__ )[0]
self.assertEqual(decoded_decoder[0] , decoded_processor.text )
self.assertEqual('</s> <s> </s>' , decoded_processor.text )
self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score )
self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score )
@parameterized.expand([[None], ['fork'], ['spawn']] )
def lowercase ( self : int , lowerCAmelCase_ : Optional[int] ) -> Dict:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_decoder()
__lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase__ , feature_extractor=lowercase__ , decoder=lowercase__ )
__lowerCAmelCase = self._get_dummy_logits()
# note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM.
# otherwise, the LM won't be available to the pool's sub-processes.
# manual logic used to allow parameterized test for both pool=None and pool=Pool(...)
if pool_context is None:
__lowerCAmelCase = processor.batch_decode(lowercase__ )
else:
with get_context(lowercase__ ).Pool() as pool:
__lowerCAmelCase = processor.batch_decode(lowercase__ , lowercase__ )
__lowerCAmelCase = list(lowercase__ )
with get_context('fork' ).Pool() as p:
__lowerCAmelCase = decoder.decode_beams_batch(lowercase__ , lowercase__ )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = [], [], []
for beams in decoded_beams:
texts_decoder.append(beams[0][0] )
logit_scores_decoder.append(beams[0][-2] )
lm_scores_decoder.append(beams[0][-1] )
self.assertListEqual(lowercase__ , decoded_processor.text )
self.assertListEqual(['<s> <s> </s>', '<s> <s> <s>'] , decoded_processor.text )
self.assertListEqual(lowercase__ , decoded_processor.logit_score )
self.assertListEqual(lowercase__ , decoded_processor.lm_score )
def lowercase ( self : Union[str, Any] ) -> Dict:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_decoder()
__lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase__ , feature_extractor=lowercase__ , decoder=lowercase__ )
__lowerCAmelCase = self._get_dummy_logits()
__lowerCAmelCase = 1_5
__lowerCAmelCase = -2_0.0
__lowerCAmelCase = -4.0
__lowerCAmelCase = processor.batch_decode(
lowercase__ , beam_width=lowercase__ , beam_prune_logp=lowercase__ , token_min_logp=lowercase__ , )
__lowerCAmelCase = decoded_processor_out.text
__lowerCAmelCase = list(lowercase__ )
with get_context('fork' ).Pool() as pool:
__lowerCAmelCase = decoder.decode_beams_batch(
lowercase__ , lowercase__ , beam_width=lowercase__ , beam_prune_logp=lowercase__ , token_min_logp=lowercase__ , )
__lowerCAmelCase = [d[0][0] for d in decoded_decoder_out]
__lowerCAmelCase = [d[0][2] for d in decoded_decoder_out]
__lowerCAmelCase = [d[0][3] for d in decoded_decoder_out]
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(['</s> <s> <s>', '<s> <s> <s>'] , lowercase__ )
self.assertTrue(np.array_equal(lowercase__ , decoded_processor_out.logit_score ) )
self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , lowercase__ , atol=1e-3 ) )
self.assertTrue(np.array_equal(lowercase__ , decoded_processor_out.lm_score ) )
self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , lowercase__ , atol=1e-3 ) )
def lowercase ( self : int ) -> List[str]:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_decoder()
__lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase__ , feature_extractor=lowercase__ , decoder=lowercase__ )
__lowerCAmelCase = self._get_dummy_logits()
__lowerCAmelCase = 2.0
__lowerCAmelCase = 5.0
__lowerCAmelCase = -2_0.0
__lowerCAmelCase = True
__lowerCAmelCase = processor.batch_decode(
lowercase__ , alpha=lowercase__ , beta=lowercase__ , unk_score_offset=lowercase__ , lm_score_boundary=lowercase__ , )
__lowerCAmelCase = decoded_processor_out.text
__lowerCAmelCase = list(lowercase__ )
decoder.reset_params(
alpha=lowercase__ , beta=lowercase__ , unk_score_offset=lowercase__ , lm_score_boundary=lowercase__ , )
with get_context('fork' ).Pool() as pool:
__lowerCAmelCase = decoder.decode_beams_batch(
lowercase__ , lowercase__ , )
__lowerCAmelCase = [d[0][0] for d in decoded_decoder_out]
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(['<s> </s> <s> </s> </s>', '</s> </s> <s> </s> </s>'] , lowercase__ )
__lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
self.assertEqual(lm_model.alpha , 2.0 )
self.assertEqual(lm_model.beta , 5.0 )
self.assertEqual(lm_model.unk_score_offset , -2_0.0 )
self.assertEqual(lm_model.score_boundary , lowercase__ )
def lowercase ( self : Dict ) -> Union[str, Any]:
__lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
__lowerCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCAmelCase = os.listdir(lowercase__ )
__lowerCAmelCase = ['alphabet.json', 'language_model']
downloaded_decoder_files.sort()
expected_decoder_files.sort()
# test that only decoder relevant files from
# https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main
# are downloaded and none of the rest (e.g. README.md, ...)
self.assertListEqual(lowercase__ , lowercase__ )
def lowercase ( self : Any ) -> int:
__lowerCAmelCase = snapshot_download('hf-internal-testing/processor_with_lm' )
__lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained(lowercase__ )
__lowerCAmelCase = processor.decoder.model_container[processor.decoder._model_key]
__lowerCAmelCase = Path(language_model._kenlm_model.path.decode('utf-8' ) ).parent.parent.absolute()
__lowerCAmelCase = os.listdir(lowercase__ )
__lowerCAmelCase = os.listdir(lowercase__ )
local_decoder_files.sort()
expected_decoder_files.sort()
# test that both decoder form hub and local files in cache are the same
self.assertListEqual(lowercase__ , lowercase__ )
def lowercase ( self : Optional[Any] ) -> List[str]:
__lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCAmelCase = AutoProcessor.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCAmelCase = floats_list((3, 1_0_0_0) )
__lowerCAmelCase = processor_wavaveca(lowercase__ , return_tensors='np' )
__lowerCAmelCase = processor_auto(lowercase__ , return_tensors='np' )
for key in input_wavaveca.keys():
self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 )
__lowerCAmelCase = self._get_dummy_logits()
__lowerCAmelCase = processor_wavaveca.batch_decode(lowercase__ )
__lowerCAmelCase = processor_auto.batch_decode(lowercase__ )
self.assertListEqual(decoded_wavaveca.text , decoded_auto.text )
def lowercase ( self : List[Any] ) -> Tuple:
__lowerCAmelCase = self.get_feature_extractor()
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = self.get_decoder()
__lowerCAmelCase = WavaVecaProcessorWithLM(tokenizer=lowercase__ , feature_extractor=lowercase__ , decoder=lowercase__ )
self.assertListEqual(
processor.model_input_names , feature_extractor.model_input_names , msg='`processor` and `feature_extractor` model input names do not match' , )
@staticmethod
def lowercase ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : str ) -> Tuple:
__lowerCAmelCase = [d[key] for d in offsets]
return retrieved_list
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCAmelCase = self._get_dummy_logits()[0]
__lowerCAmelCase = processor.decode(lowercase__ , output_word_offsets=lowercase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowercase__ , lowercase__ ) )
self.assertEqual(' '.join(self.get_from_offsets(outputs['word_offsets'] , 'word' ) ) , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'] , 'end_offset' ) , [1, 3, 5] )
def lowercase ( self : List[str] ) -> str:
__lowerCAmelCase = WavaVecaProcessorWithLM.from_pretrained('hf-internal-testing/processor_with_lm' )
__lowerCAmelCase = self._get_dummy_logits()
__lowerCAmelCase = processor.batch_decode(lowercase__ , output_word_offsets=lowercase__ )
# check Wav2Vec2CTCTokenizerOutput keys for word
self.assertEqual(len(outputs.keys() ) , 4 )
self.assertTrue('text' in outputs )
self.assertTrue('word_offsets' in outputs )
self.assertTrue(isinstance(lowercase__ , lowercase__ ) )
self.assertListEqual(
[' '.join(self.get_from_offsets(lowercase__ , 'word' ) ) for o in outputs['word_offsets']] , outputs.text )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'word' ) , ['<s>', '<s>', '</s>'] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'start_offset' ) , [0, 2, 4] )
self.assertListEqual(self.get_from_offsets(outputs['word_offsets'][0] , 'end_offset' ) , [1, 3, 5] )
@slow
@require_torch
@require_torchaudio
def lowercase ( self : Dict ) -> Any:
import torch
__lowerCAmelCase = load_dataset('common_voice' , 'en' , split='train' , streaming=lowercase__ )
__lowerCAmelCase = ds.cast_column('audio' , datasets.Audio(sampling_rate=1_6_0_0_0 ) )
__lowerCAmelCase = iter(lowercase__ )
__lowerCAmelCase = next(lowercase__ )
__lowerCAmelCase = AutoProcessor.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
__lowerCAmelCase = WavaVecaForCTC.from_pretrained('patrickvonplaten/wav2vec2-base-100h-with-lm' )
# compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train
__lowerCAmelCase = processor(sample['audio']['array'] , return_tensors='pt' ).input_values
with torch.no_grad():
__lowerCAmelCase = model(lowercase__ ).logits.cpu().numpy()
__lowerCAmelCase = processor.decode(logits[0] , output_word_offsets=lowercase__ )
__lowerCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate
__lowerCAmelCase = [
{
'start_time': d['start_offset'] * time_offset,
'end_time': d['end_offset'] * time_offset,
'word': d['word'],
}
for d in output['word_offsets']
]
__lowerCAmelCase = 'WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL'
# output words
self.assertEqual(' '.join(self.get_from_offsets(lowercase__ , 'word' ) ) , lowercase__ )
self.assertEqual(' '.join(self.get_from_offsets(lowercase__ , 'word' ) ) , output.text )
# output times
__lowerCAmelCase = torch.tensor(self.get_from_offsets(lowercase__ , 'start_time' ) )
__lowerCAmelCase = torch.tensor(self.get_from_offsets(lowercase__ , 'end_time' ) )
# fmt: off
__lowerCAmelCase = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] )
__lowerCAmelCase = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] )
# fmt: on
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=0.01 ) )
self.assertTrue(torch.allclose(lowercase__ , lowercase__ , atol=0.01 ) )
| 53 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_lowercase: Any = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase: str = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
_lowercase: Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 192 | 0 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
def lowercase (_A ):
"""simple docstring"""
if isinstance(_A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_A ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = ["pixel_values"]
def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = True , snake_case__ = 1 / 255 , snake_case__ = True , snake_case__ = True , snake_case__ = None , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[Any] = size if size is not None else {'shortest_edge': 256}
_lowerCAmelCase : str = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_lowerCAmelCase : Union[str, Any] = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_lowerCAmelCase : Any = get_size_dict(snake_case__ , param_name='crop_size' )
_lowerCAmelCase : Tuple = do_resize
_lowerCAmelCase : int = size
_lowerCAmelCase : Optional[Any] = do_center_crop
_lowerCAmelCase : Tuple = crop_size
_lowerCAmelCase : Optional[int] = resample
_lowerCAmelCase : Optional[Any] = do_rescale
_lowerCAmelCase : Optional[Any] = rescale_factor
_lowerCAmelCase : str = offset
_lowerCAmelCase : Tuple = do_normalize
_lowerCAmelCase : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : Optional[int] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def a ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(snake_case__ , default_to_square=snake_case__ )
if "shortest_edge" in size:
_lowerCAmelCase : List[Any] = get_resize_output_image_size(snake_case__ , size['shortest_edge'] , default_to_square=snake_case__ )
elif "height" in size and "width" in size:
_lowerCAmelCase : Union[str, Any] = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(snake_case__ , size=(size['height'], size['width']) , data_format=snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ , snake_case__ = True , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : int = image.astype(np.floataa )
if offset:
_lowerCAmelCase : Union[str, Any] = image - (scale / 2)
return rescale(snake_case__ , scale=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , **snake_case__ , ):
'''simple docstring'''
return normalize(snake_case__ , mean=snake_case__ , std=snake_case__ , data_format=snake_case__ , **snake_case__ )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , ):
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
_lowerCAmelCase : Dict = to_numpy_array(snake_case__ )
if do_resize:
_lowerCAmelCase : Dict = self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ )
if do_center_crop:
_lowerCAmelCase : Union[str, Any] = self.center_crop(snake_case__ , size=snake_case__ )
if do_rescale:
_lowerCAmelCase : Union[str, Any] = self.rescale(image=snake_case__ , scale=snake_case__ , offset=snake_case__ )
if do_normalize:
_lowerCAmelCase : int = self.normalize(image=snake_case__ , mean=snake_case__ , std=snake_case__ )
_lowerCAmelCase : List[str] = to_channel_dimension_format(snake_case__ , snake_case__ )
return image
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ):
'''simple docstring'''
_lowerCAmelCase : Any = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : Any = resample if resample is not None else self.resample
_lowerCAmelCase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : str = offset if offset is not None else self.offset
_lowerCAmelCase : Any = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : Union[str, Any] = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[str] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Tuple = size if size is not None else self.size
_lowerCAmelCase : Union[str, Any] = get_size_dict(snake_case__ , default_to_square=snake_case__ )
_lowerCAmelCase : str = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : List[str] = get_size_dict(snake_case__ , param_name='crop_size' )
if not valid_images(snake_case__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
_lowerCAmelCase : int = make_batched(snake_case__ )
_lowerCAmelCase : Optional[int] = [
[
self._preprocess_image(
image=snake_case__ , do_resize=snake_case__ , size=snake_case__ , resample=snake_case__ , do_center_crop=snake_case__ , crop_size=snake_case__ , do_rescale=snake_case__ , rescale_factor=snake_case__ , offset=snake_case__ , do_normalize=snake_case__ , image_mean=snake_case__ , image_std=snake_case__ , data_format=snake_case__ , )
for img in video
]
for video in videos
]
_lowerCAmelCase : Any = {'pixel_values': videos}
return BatchFeature(data=snake_case__ , tensor_type=snake_case__ )
| 630 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCAmelCase : int = {
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[Any] = [
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 630 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.