code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from itertools import count
def SCREAMING_SNAKE_CASE__ ( __A = 50 ) -> int:
_snake_case = [1] * min_block_length
for n in count(__A ):
fill_count_functions.append(1 )
for block_length in range(__A , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_000_000:
break
return n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 495 |
"""simple docstring"""
import logging
import os
from .state import PartialState
class lowercase ( logging.LoggerAdapter ):
@staticmethod
def _snake_case ( lowercase ) -> Optional[Any]:
lowerCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _snake_case ( self , lowercase , lowercase , *lowercase , **lowercase ) -> Dict:
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
lowerCAmelCase = kwargs.pop("""main_process_only""" , lowercase )
lowerCAmelCase = kwargs.pop("""in_order""" , lowercase )
if self.isEnabledFor(lowercase ):
if self._should_log(lowercase ):
lowerCAmelCase , lowerCAmelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
elif in_order:
lowerCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCAmelCase , lowerCAmelCase = self.process(lowercase , lowercase )
self.logger.log(lowercase , lowercase , *lowercase , **lowercase )
state.wait_for_everyone()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str = None ):
'''simple docstring'''
if log_level is None:
lowerCAmelCase = os.environ.get("""ACCELERATE_LOG_LEVEL""" , SCREAMING_SNAKE_CASE )
lowerCAmelCase = logging.getLogger(SCREAMING_SNAKE_CASE )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(SCREAMING_SNAKE_CASE , {} )
| 532 | 0 |
"""simple docstring"""
import warnings
from functools import wraps
from typing import Callable
def A( snake_case_ ):
"""simple docstring"""
@wraps(snake_case_ )
def _inner_fn(*snake_case_ , **snake_case_ ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , snake_case_ , )
return fn(*snake_case_ , **snake_case_ )
return _inner_fn
| 709 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: Dict = tau * frequency / samplerate
lowercase__: List[str] = sin(snake_case_ )
lowercase__: Union[str, Any] = cos(snake_case_ )
lowercase__: Optional[Any] = _sin / (2 * q_factor)
lowercase__: int = (1 - _cos) / 2
lowercase__: Tuple = 1 - _cos
lowercase__: List[Any] = 1 + alpha
lowercase__: Any = -2 * _cos
lowercase__: Dict = 1 - alpha
lowercase__: Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: str = tau * frequency / samplerate
lowercase__: Dict = sin(snake_case_ )
lowercase__: Dict = cos(snake_case_ )
lowercase__: Tuple = _sin / (2 * q_factor)
lowercase__: Any = (1 + _cos) / 2
lowercase__: str = -1 - _cos
lowercase__: Any = 1 + alpha
lowercase__: List[str] = -2 * _cos
lowercase__: Optional[Any] = 1 - alpha
lowercase__: Optional[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: List[Any] = tau * frequency / samplerate
lowercase__: Optional[int] = sin(snake_case_ )
lowercase__: List[Any] = cos(snake_case_ )
lowercase__: Any = _sin / (2 * q_factor)
lowercase__: Any = _sin / 2
lowercase__: Optional[Any] = 0
lowercase__: Any = -ba
lowercase__: Optional[int] = 1 + alpha
lowercase__: Any = -2 * _cos
lowercase__: Any = 1 - alpha
lowercase__: Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) ):
"""simple docstring"""
lowercase__: List[str] = tau * frequency / samplerate
lowercase__: Tuple = sin(snake_case_ )
lowercase__: List[str] = cos(snake_case_ )
lowercase__: Union[str, Any] = _sin / (2 * q_factor)
lowercase__: List[str] = 1 - alpha
lowercase__: Optional[Any] = -2 * _cos
lowercase__: str = 1 + alpha
lowercase__: List[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: Tuple = tau * frequency / samplerate
lowercase__: Tuple = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: str = _sin / (2 * q_factor)
lowercase__: Optional[Any] = 10 ** (gain_db / 40)
lowercase__: Union[str, Any] = 1 + alpha * big_a
lowercase__: str = -2 * _cos
lowercase__: Tuple = 1 - alpha * big_a
lowercase__: Union[str, Any] = 1 + alpha / big_a
lowercase__: Dict = -2 * _cos
lowercase__: Optional[Any] = 1 - alpha / big_a
lowercase__: Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: Optional[Any] = tau * frequency / samplerate
lowercase__: Union[str, Any] = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: Optional[int] = _sin / (2 * q_factor)
lowercase__: Optional[int] = 10 ** (gain_db / 40)
lowercase__: List[Any] = (big_a + 1) - (big_a - 1) * _cos
lowercase__: Union[str, Any] = (big_a + 1) + (big_a - 1) * _cos
lowercase__: Any = (big_a - 1) - (big_a + 1) * _cos
lowercase__: str = (big_a - 1) + (big_a + 1) * _cos
lowercase__: int = 2 * sqrt(snake_case_ ) * alpha
lowercase__: Union[str, Any] = big_a * (pmc + aaa)
lowercase__: List[Any] = 2 * big_a * mpc
lowercase__: Dict = big_a * (pmc - aaa)
lowercase__: Dict = ppmc + aaa
lowercase__: List[str] = -2 * pmpc
lowercase__: int = ppmc - aaa
lowercase__: Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def A( snake_case_ , snake_case_ , snake_case_ , snake_case_ = 1 / sqrt(2 ) , ):
"""simple docstring"""
lowercase__: List[str] = tau * frequency / samplerate
lowercase__: Dict = sin(snake_case_ )
lowercase__: Optional[Any] = cos(snake_case_ )
lowercase__: Tuple = _sin / (2 * q_factor)
lowercase__: int = 10 ** (gain_db / 40)
lowercase__: Dict = (big_a + 1) - (big_a - 1) * _cos
lowercase__: Optional[int] = (big_a + 1) + (big_a - 1) * _cos
lowercase__: Tuple = (big_a - 1) - (big_a + 1) * _cos
lowercase__: Dict = (big_a - 1) + (big_a + 1) * _cos
lowercase__: Dict = 2 * sqrt(snake_case_ ) * alpha
lowercase__: Optional[int] = big_a * (ppmc + aaa)
lowercase__: Dict = -2 * big_a * pmpc
lowercase__: Dict = big_a * (ppmc - aaa)
lowercase__: Tuple = pmc + aaa
lowercase__: Optional[int] = 2 * mpc
lowercase__: Union[str, Any] = pmc - aaa
lowercase__: Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 120 | 0 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowercase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str
_lowerCamelCase: List[str]
_lowerCamelCase: Optional[List[str]]
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: List[int]
_lowerCamelCase: List[int]
_lowerCamelCase: Optional[List[int]] = None
_lowerCamelCase: Optional[List[int]] = None
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''train'''
_lowerCamelCase: Any = '''dev'''
_lowerCamelCase: Optional[Any] = '''test'''
class lowerCAmelCase_ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Dict ,A_ : Union[Split, str] ) -> List[InputExample]:
raise NotImplementedError
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : str ) -> List[str]:
raise NotImplementedError
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[InputExample] ,A_ : List[str] ,A_ : int ,A_ : PreTrainedTokenizer ,A_ : Optional[int]=False ,A_ : Any="[CLS]" ,A_ : Optional[Any]=1 ,A_ : List[Any]="[SEP]" ,A_ : List[str]=False ,A_ : Union[str, Any]=False ,A_ : List[Any]=0 ,A_ : Optional[Any]=0 ,A_ : str=-100 ,A_ : Union[str, Any]=0 ,A_ : int=True ,) -> List[InputFeatures]:
A = {label: i for i, label in enumerate(A_ )}
A = []
for ex_index, example in enumerate(A_ ):
if ex_index % 1_0000 == 0:
logger.info('Writing example %d of %d' ,A_ ,len(A_ ) )
A = []
A = []
for word, label in zip(example.words ,example.labels ):
A = tokenizer.tokenize(A_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A_ ) > 0:
tokens.extend(A_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
A = tokenizer.num_special_tokens_to_add()
if len(A_ ) > max_seq_length - special_tokens_count:
A = tokens[: (max_seq_length - special_tokens_count)]
A = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
A = [sequence_a_segment_id] * len(A_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
A = [cls_token] + tokens
A = [pad_token_label_id] + label_ids
A = [cls_token_segment_id] + segment_ids
A = tokenizer.convert_tokens_to_ids(A_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
A = [1 if mask_padding_with_zero else 0] * len(A_ )
# Zero-pad up to the sequence length.
A = max_seq_length - len(A_ )
if pad_on_left:
A = ([pad_token] * padding_length) + input_ids
A = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
A = ([pad_token_segment_id] * padding_length) + segment_ids
A = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
assert len(A_ ) == max_seq_length
if ex_index < 5:
logger.info('*** Example ***' )
logger.info('guid: %s' ,example.guid )
logger.info('tokens: %s' ,' '.join([str(A_ ) for x in tokens] ) )
logger.info('input_ids: %s' ,' '.join([str(A_ ) for x in input_ids] ) )
logger.info('input_mask: %s' ,' '.join([str(A_ ) for x in input_mask] ) )
logger.info('segment_ids: %s' ,' '.join([str(A_ ) for x in segment_ids] ) )
logger.info('label_ids: %s' ,' '.join([str(A_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
A = None
features.append(
InputFeatures(
input_ids=A_ ,attention_mask=A_ ,token_type_ids=A_ ,label_ids=A_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[InputFeatures]
_lowerCamelCase: int = nn.CrossEntropyLoss().ignore_index
def __init__( self : Tuple ,A_ : TokenClassificationTask ,A_ : str ,A_ : PreTrainedTokenizer ,A_ : List[str] ,A_ : str ,A_ : Optional[int] = None ,A_ : Optional[Any]=False ,A_ : Split = Split.train ,) -> Union[str, Any]:
# Load data features from cache or dataset file
A = os.path.join(
A_ ,'cached_{}_{}_{}'.format(mode.value ,tokenizer.__class__.__name__ ,str(A_ ) ) ,)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(A_ ):
if os.path.exists(A_ ) and not overwrite_cache:
logger.info(F'Loading features from cached file {cached_features_file}' )
A = torch.load(A_ )
else:
logger.info(F'Creating features from dataset file at {data_dir}' )
A = token_classification_task.read_examples_from_file(A_ ,A_ )
# TODO clean up all this to leverage built-in features of tokenizers
A = token_classification_task.convert_examples_to_features(
A_ ,A_ ,A_ ,A_ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A_ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
logger.info(F'Saving features into cached file {cached_features_file}' )
torch.save(self.features ,A_ )
def __len__( self : List[str] ) -> str:
return len(self.features )
def __getitem__( self : List[str] ,A_ : int ) -> InputFeatures:
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: List[InputFeatures]
_lowerCamelCase: int = -100
def __init__( self : List[Any] ,A_ : TokenClassificationTask ,A_ : str ,A_ : PreTrainedTokenizer ,A_ : List[str] ,A_ : str ,A_ : Optional[int] = None ,A_ : Union[str, Any]=False ,A_ : Split = Split.train ,) -> Union[str, Any]:
A = token_classification_task.read_examples_from_file(A_ ,A_ )
# TODO clean up all this to leverage built-in features of tokenizers
A = token_classification_task.convert_examples_to_features(
A_ ,A_ ,A_ ,A_ ,cls_token_at_end=bool(model_type in ['xlnet'] ) ,cls_token=tokenizer.cls_token ,cls_token_segment_id=2 if model_type in ['xlnet'] else 0 ,sep_token=tokenizer.sep_token ,sep_token_extra=A_ ,pad_on_left=bool(tokenizer.padding_side == 'left' ) ,pad_token=tokenizer.pad_token_id ,pad_token_segment_id=tokenizer.pad_token_type_id ,pad_token_label_id=self.pad_token_label_id ,)
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
A = tf.data.Dataset.from_generator(
A_ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa}, tf.intaa) ,(
{'input_ids': tf.TensorShape([None] ), 'attention_mask': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) ,)
else:
A = tf.data.Dataset.from_generator(
A_ ,({'input_ids': tf.intaa, 'attention_mask': tf.intaa, 'token_type_ids': tf.intaa}, tf.intaa) ,(
{
'input_ids': tf.TensorShape([None] ),
'attention_mask': tf.TensorShape([None] ),
'token_type_ids': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[int] ) -> Optional[Any]:
return len(self.features )
def __getitem__( self : Optional[Any] ,A_ : Union[str, Any] ) -> InputFeatures:
return self.features[i] | 91 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case , snake_case ) -> Any:
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self , snake_case = 1 , snake_case = 100 , snake_case = None , snake_case = None , snake_case = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
_UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
_UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
f' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
_UpperCAmelCase = int(snake_case )
if sample_size % down_scale_factor != 0:
_UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.' )
_UpperCAmelCase = int(snake_case )
_UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
_UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(snake_case )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_UpperCAmelCase = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=snake_case )
# set step values
self.scheduler.set_timesteps(snake_case , device=audio.device )
_UpperCAmelCase = self.scheduler.timesteps.to(snake_case )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(snake_case , snake_case ).sample
# 2. compute previous image: x_t -> t_t-1
_UpperCAmelCase = self.scheduler.step(snake_case , snake_case , snake_case ).prev_sample
_UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
_UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=snake_case )
| 573 | 0 |
from math import pow
def UpperCAmelCase_( a__ , a__ , a__ , a__ , a__ , ):
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
SCREAMING_SNAKE_CASE : List[str] = int(pow(a__ , a__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Any = backtrack(
a__ , a__ , current_number + 1 , a__ , a__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = backtrack(
a__ , a__ , current_number + 1 , a__ , a__ )
return current_sum, solutions_count
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(a__ , a__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 333 |
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = list(a__ )
SCREAMING_SNAKE_CASE : int = list(a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count += 1
SCREAMING_SNAKE_CASE : Any = '''_'''
if count > 1:
return False
else:
return "".join(a__ )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
while True:
SCREAMING_SNAKE_CASE : Tuple = ['''$'''] * len(a__ )
SCREAMING_SNAKE_CASE : int = []
for i in range(len(a__ ) ):
for j in range(i + 1 , len(a__ ) ):
SCREAMING_SNAKE_CASE : Any = compare_string(binary[i] , binary[j] )
if k is False:
SCREAMING_SNAKE_CASE : List[Any] = '''*'''
SCREAMING_SNAKE_CASE : Optional[int] = '''*'''
temp.append('''X''' )
for i in range(len(a__ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(a__ ) == 0:
return pi
SCREAMING_SNAKE_CASE : List[Any] = list(set(a__ ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = []
for minterm in minterms:
SCREAMING_SNAKE_CASE : List[str] = ''''''
for _ in range(a__ ):
SCREAMING_SNAKE_CASE : Any = str(minterm % 2 ) + string
minterm //= 2
temp.append(a__ )
return temp
def UpperCAmelCase_( a__ , a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = list(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = list(a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = 0
for i in range(len(a__ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = []
SCREAMING_SNAKE_CASE : List[Any] = [0] * len(a__ )
for i in range(len(chart[0] ) ):
SCREAMING_SNAKE_CASE : List[str] = 0
SCREAMING_SNAKE_CASE : Union[str, Any] = -1
for j in range(len(a__ ) ):
if chart[j][i] == 1:
count += 1
SCREAMING_SNAKE_CASE : int = j
if count == 1:
SCREAMING_SNAKE_CASE : Optional[Any] = 1
for i in range(len(a__ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(a__ ) ):
SCREAMING_SNAKE_CASE : Optional[int] = 0
temp.append(prime_implicants[i] )
while True:
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : str = -1
SCREAMING_SNAKE_CASE : str = 0
for i in range(len(a__ ) ):
SCREAMING_SNAKE_CASE : List[str] = chart[i].count(1 )
if count_n > max_n:
SCREAMING_SNAKE_CASE : List[Any] = count_n
SCREAMING_SNAKE_CASE : Optional[int] = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(a__ ) ):
SCREAMING_SNAKE_CASE : Optional[int] = 0
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = [[0 for x in range(len(a__ ) )] for x in range(len(a__ ) )]
for i in range(len(a__ ) ):
SCREAMING_SNAKE_CASE : Any = prime_implicants[i].count('''_''' )
for j in range(len(a__ ) ):
if is_for_table(prime_implicants[i] , binary[j] , a__ ):
SCREAMING_SNAKE_CASE : Optional[int] = 1
return chart
def UpperCAmelCase_( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = int(input('''Enter the no. of variables\n''' ) )
SCREAMING_SNAKE_CASE : List[str] = [
float(a__ )
for x in input(
'''Enter the decimal representation of Minterms \'Spaces Separated\'\n''' ).split()
]
SCREAMING_SNAKE_CASE : Union[str, Any] = decimal_to_binary(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = check(a__ )
print('''Prime Implicants are:''' )
print(a__ )
SCREAMING_SNAKE_CASE : Tuple = prime_implicant_chart(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = selection(a__ , a__ )
print('''Essential Prime Implicants are:''' )
print(a__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 333 | 1 |
'''simple docstring'''
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""nvidia/segformer-b0-finetuned-ade-512-512""": (
"""https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json"""
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class __lowercase ( _a ):
_a = "segformer"
def __init__( self , UpperCamelCase=3 , UpperCamelCase=4 , UpperCamelCase=[2, 2, 2, 2] , UpperCamelCase=[8, 4, 2, 1] , UpperCamelCase=[32, 64, 160, 256] , UpperCamelCase=[7, 3, 3, 3] , UpperCamelCase=[4, 2, 2, 2] , UpperCamelCase=[1, 2, 5, 8] , UpperCamelCase=[4, 4, 4, 4] , UpperCamelCase="gelu" , UpperCamelCase=0.0 , UpperCamelCase=0.0 , UpperCamelCase=0.1 , UpperCamelCase=0.02 , UpperCamelCase=0.1 , UpperCamelCase=1e-6 , UpperCamelCase=256 , UpperCamelCase=255 , **UpperCamelCase , ) -> Dict:
super().__init__(**__lowerCamelCase )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'
' removed, as the behaviour will default to that of reshape_last_stage = True.' , __lowerCamelCase , )
__a = num_channels
__a = num_encoder_blocks
__a = depths
__a = sr_ratios
__a = hidden_sizes
__a = patch_sizes
__a = strides
__a = mlp_ratios
__a = num_attention_heads
__a = hidden_act
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = classifier_dropout_prob
__a = initializer_range
__a = drop_path_rate
__a = layer_norm_eps
__a = decoder_hidden_size
__a = kwargs.get('reshape_last_stage' , __lowerCamelCase )
__a = semantic_loss_ignore_index
class __lowercase ( _a ):
_a = version.parse("""1.11""" )
@property
def UpperCamelCase__ ( self ) -> Tuple:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def UpperCamelCase__ ( self ) -> Tuple:
return 1e-4
@property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
return 12
| 539 |
from scipy.stats import pearsonr
import datasets
__magic_name__ : Union[str, Any] = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
__magic_name__ : Tuple = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
__magic_name__ : int = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ (datasets.Metric ):
def A__ ( self : List[str] ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''float''' ),
'''references''': datasets.Value('''float''' ),
} ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , )
def A__ ( self : Any , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : Dict=False ):
"""simple docstring"""
if return_pvalue:
lowerCAmelCase__ = pearsonr(__lowerCamelCase , __lowerCamelCase )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(__lowerCamelCase , __lowerCamelCase )[0] )}
| 615 | 0 |
"""simple docstring"""
def __lowerCAmelCase( __UpperCAmelCase ,__UpperCAmelCase ):
"""simple docstring"""
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
raise ValueError('iterations must be defined as integers' )
if not isinstance(__UpperCAmelCase ,__UpperCAmelCase ) or not number >= 1:
raise ValueError(
'starting number must be\n and integer and be more than 0' )
if not iterations >= 1:
raise ValueError('Iterations must be done more than 0 times to play FizzBuzz' )
_lowercase : List[str] = ''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(__UpperCAmelCase )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283 | """simple docstring"""
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = ""
_snake_case = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
_snake_case = None # compression type in fsspec. ex: "gzip"
_snake_case = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : str , lowerCamelCase_ : str = "" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , **lowerCamelCase_ : List[str] ):
"""simple docstring"""
super().__init__(self , **lowerCamelCase_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_lowercase : Union[str, Any] = fsspec.open(
lowerCamelCase_ , mode='rb' , protocol=lowerCamelCase_ , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_lowercase : str = os.path.basename(self.file.path.split('::' )[0] )
_lowercase : Optional[int] = (
self.compressed_name[: self.compressed_name.rindex('.' )]
if '.' in self.compressed_name
else self.compressed_name
)
_lowercase : str = None
@classmethod
def __UpperCAmelCase ( cls : int , lowerCamelCase_ : List[str] ):
"""simple docstring"""
return super()._strip_protocol(lowerCamelCase_ ).lstrip('/' )
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.dir_cache is None:
_lowercase : Tuple = {**self.file.fs.info(self.file.path ), 'name': self.uncompressed_name}
_lowercase : int = {f['name']: f}
def __UpperCAmelCase ( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return self.file.open().read()
def __UpperCAmelCase ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : str=None , lowerCamelCase_ : str=True , lowerCamelCase_ : Union[str, Any]=None , **lowerCamelCase_ : Optional[int] , ):
"""simple docstring"""
_lowercase : Union[str, Any] = self._strip_protocol(lowerCamelCase_ )
if mode != "rb":
raise ValueError(F'''Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'''' )
return self.file.open()
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "bz2"
_snake_case = "bz2"
_snake_case = ".bz2"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "gzip"
_snake_case = "gzip"
_snake_case = ".gz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "lz4"
_snake_case = "lz4"
_snake_case = ".lz4"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "xz"
_snake_case = "xz"
_snake_case = ".xz"
class _lowerCamelCase (__lowerCamelCase ):
_snake_case = "zstd"
_snake_case = "zstd"
_snake_case = ".zst"
def __init__( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : str = "rb" , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : Optional[dict] = None , lowerCamelCase_ : int = DEFAULT_BLOCK_SIZE , **lowerCamelCase_ : int , ):
"""simple docstring"""
super().__init__(
fo=lowerCamelCase_ , mode=lowerCamelCase_ , target_protocol=lowerCamelCase_ , target_options=lowerCamelCase_ , block_size=lowerCamelCase_ , **lowerCamelCase_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_lowercase : Any = self.file.__enter__
class _lowerCamelCase :
def __init__( self : Any , lowerCamelCase_ : List[Any] ):
"""simple docstring"""
_lowercase : Tuple = file_
def __enter__( self : str ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] , *lowerCamelCase_ : Optional[Any] , **lowerCamelCase_ : Union[str, Any] ):
"""simple docstring"""
self._file.__exit__(*lowerCamelCase_ , **lowerCamelCase_ )
def __iter__( self : Optional[int] ):
"""simple docstring"""
return iter(self._file )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
return next(self._file )
def __getattr__( self : List[str] , lowerCamelCase_ : str ):
"""simple docstring"""
return getattr(self._file , lowerCamelCase_ )
def fixed_enter(*lowerCamelCase_ : List[Any] , **lowerCamelCase_ : int ):
return WrappedFile(_enter(*lowerCamelCase_ , **lowerCamelCase_ ) )
_lowercase : Optional[int] = fixed_enter
| 283 | 1 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase: Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase: Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
lowerCAmelCase: int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
lowerCAmelCase: Union[str, Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
lowerCAmelCase: Union[str, Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
lowerCAmelCase: int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 526 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase: Optional[int] = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase: Optional[Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(F"{len(upper_files)} files contain uppercase characters:")
print('\n'.join(upper_files) + '\n')
lowerCAmelCase: int = [file for file in filepaths if ' ' in file]
if space_files:
print(F"{len(space_files)} files contain space characters:")
print('\n'.join(space_files) + '\n')
lowerCAmelCase: Union[str, Any] = [file for file in filepaths if '-' in file]
if hyphen_files:
print(F"{len(hyphen_files)} files contain hyphen characters:")
print('\n'.join(hyphen_files) + '\n')
lowerCAmelCase: Union[str, Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(F"{len(nodir_files)} files are not in a directory:")
print('\n'.join(nodir_files) + '\n')
lowerCAmelCase: int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files) | 526 | 1 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"vocab_file": "spiece.model"}
SCREAMING_SNAKE_CASE__ = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
}
}
# TODO(PVP) - this should be removed in Transformers v5
SCREAMING_SNAKE_CASE__ = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
SCREAMING_SNAKE_CASE__ = "▁"
class _UpperCAmelCase ( lowercase ):
lowerCamelCase_ : Dict = VOCAB_FILES_NAMES
lowerCamelCase_ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : str = ["""input_ids""", """attention_mask"""]
def __init__( self : str , UpperCAmelCase : Any , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Tuple="<unk>" , UpperCAmelCase : int="<pad>" , UpperCAmelCase : str=1_00 , UpperCAmelCase : int=None , UpperCAmelCase : Optional[Dict[str, Any]] = None , UpperCAmelCase : Dict=True , **UpperCAmelCase : List[str] , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
SCREAMING_SNAKE_CASE_ :Tuple = [F"<extra_id_{i}>" for i in range(UpperCAmelCase)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
SCREAMING_SNAKE_CASE_ :int = len(set(filter(lambda UpperCAmelCase: bool("extra_id" in str(UpperCAmelCase)) , UpperCAmelCase)))
if extra_tokens != extra_ids:
raise ValueError(
F"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens")
if legacy:
logger.warning_once(
F"You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565")
SCREAMING_SNAKE_CASE_ :List[Any] = legacy
SCREAMING_SNAKE_CASE_ :Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , extra_ids=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=UpperCAmelCase , **UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ :Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE_ :int = extra_ids
SCREAMING_SNAKE_CASE_ :Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase)
@staticmethod
def _snake_case ( UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : str):
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
SCREAMING_SNAKE_CASE_ :str = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F" {pretrained_model_name_or_path} automatically truncating your input to"
F" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"
F" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , UpperCAmelCase , )
return max_model_length
@property
def _snake_case ( self : Dict):
return self.sp_model.get_piece_size() + self._extra_ids
def _snake_case ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ :List[Any] = {self.convert_ids_to_tokens(UpperCAmelCase): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def _snake_case ( self : Dict , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase)
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(UpperCAmelCase)) + [1]
return ([0] * len(UpperCAmelCase)) + [1] + ([0] * len(UpperCAmelCase)) + [1]
def _snake_case ( self : Union[str, Any]):
return list(
set(filter(lambda UpperCAmelCase: bool(re.search(R"<extra_id_\d+>" , UpperCAmelCase)) is not None , self.additional_special_tokens)))
def _snake_case ( self : Union[str, Any]):
return [self._convert_token_to_id(UpperCAmelCase) for token in self.get_sentinel_tokens()]
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : List[int]):
if len(UpperCAmelCase) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"
" eos tokens being added.")
return token_ids
else:
return token_ids + [self.eos_token_id]
def _snake_case ( self : Tuple , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_ :List[str] = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos) * [0]
return len(token_ids_a + eos + token_ids_a + eos) * [0]
def _snake_case ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_ :str = self._add_eos_if_not_present(UpperCAmelCase)
if token_ids_a is None:
return token_ids_a
else:
SCREAMING_SNAKE_CASE_ :Optional[Any] = self._add_eos_if_not_present(UpperCAmelCase)
return token_ids_a + token_ids_a
def __getstate__( self : Optional[int]):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE_ :int = None
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase : str):
SCREAMING_SNAKE_CASE_ :Optional[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs"):
SCREAMING_SNAKE_CASE_ :Any = {}
SCREAMING_SNAKE_CASE_ :List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : "TextInput" , **UpperCAmelCase : Dict):
# Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at
# the beginning of the text
if not self.legacy:
SCREAMING_SNAKE_CASE_ :Dict = SPIECE_UNDERLINE + text.replace(UpperCAmelCase , " ")
return super().tokenize(UpperCAmelCase , **UpperCAmelCase)
def _snake_case ( self : str , UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int]):
if not self.legacy:
SCREAMING_SNAKE_CASE_ :List[Any] = text.startswith(UpperCAmelCase)
if is_first:
SCREAMING_SNAKE_CASE_ :Any = text[1:]
SCREAMING_SNAKE_CASE_ :Dict = self.sp_model.encode(UpperCAmelCase , out_type=UpperCAmelCase)
if not self.legacy and not is_first and not text.startswith(" ") and tokens[0].startswith(UpperCAmelCase):
SCREAMING_SNAKE_CASE_ :Tuple = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]
return tokens
def _snake_case ( self : Dict , UpperCAmelCase : int):
if token.startswith("<extra_id_"):
SCREAMING_SNAKE_CASE_ :str = re.match(R"<extra_id_(\d+)>" , UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Tuple = int(match.group(1))
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(UpperCAmelCase)
def _snake_case ( self : Union[str, Any] , UpperCAmelCase : Tuple):
if index < self.sp_model.get_piece_size():
SCREAMING_SNAKE_CASE_ :Optional[int] = self.sp_model.IdToPiece(UpperCAmelCase)
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = F"<extra_id_{self.vocab_size - 1 - index}>"
return token
def _snake_case ( self : Dict , UpperCAmelCase : List[Any]):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
SCREAMING_SNAKE_CASE_ :List[str] = ""
SCREAMING_SNAKE_CASE_ :List[Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase) + token
SCREAMING_SNAKE_CASE_ :Optional[Any] = True
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
else:
current_sub_tokens.append(UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Optional[int] = False
out_string += self.sp_model.decode(UpperCAmelCase)
return out_string.strip()
def _snake_case ( self : str , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None):
if not os.path.isdir(UpperCAmelCase):
logger.error(F"Vocabulary path ({save_directory}) should be a directory")
return
SCREAMING_SNAKE_CASE_ :List[Any] = os.path.join(
UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase , "wb") as fi:
SCREAMING_SNAKE_CASE_ :Optional[int] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase)
return (out_vocab_file,)
| 140 |
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :set[int] = set()
# To detect a back edge, keep track of vertices currently in the recursion stack
SCREAMING_SNAKE_CASE_ :set[int] = set()
return any(
node not in visited and depth_first_search(a , a , a , a )
for node in graph )
def lowercase ( a , a , a , a ):
'''simple docstring'''
visited.add(a )
rec_stk.add(a )
for node in graph[vertex]:
if node not in visited:
if depth_first_search(a , a , a , a ):
return True
elif node in rec_stk:
return True
# The node needs to be removed from recursion stack before function ends
rec_stk.remove(a )
return False
if __name__ == "__main__":
from doctest import testmod
testmod()
| 140 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
UpperCAmelCase__ = 42
class UpperCamelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self : int , UpperCAmelCase__ : int = 65_536 , UpperCAmelCase__ : Optional[int] = None , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 2 , UpperCAmelCase__ : int = 0 , UpperCAmelCase__ : str = "fourier" , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : float = 0.0 , UpperCAmelCase__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase__ : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase__ : str = None , UpperCAmelCase__ : Tuple[int] = (32, 32, 64) , UpperCAmelCase__ : str = None , UpperCAmelCase__ : int = 8 , UpperCAmelCase__ : int = 1 , UpperCAmelCase__ : bool = False , ) ->Tuple:
'''simple docstring'''
super().__init__()
A__ = sample_size
# time
if time_embedding_type == "fourier":
A__ = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase__ , log=UpperCAmelCase__ , flip_sin_to_cos=UpperCAmelCase__)
A__ = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A__ = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase__ , downscale_freq_shift=UpperCAmelCase__)
A__ = block_out_channels[0]
if use_timestep_embedding:
A__ = block_out_channels[0] * 4
A__ = TimestepEmbedding(
in_channels=UpperCAmelCase__ , time_embed_dim=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , out_dim=block_out_channels[0] , )
A__ = nn.ModuleList([])
A__ = None
A__ = nn.ModuleList([])
A__ = None
# down
A__ = in_channels
for i, down_block_type in enumerate(UpperCAmelCase__):
A__ = output_channel
A__ = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A__ = i == len(UpperCAmelCase__) - 1
A__ = get_down_block(
UpperCAmelCase__ , num_layers=UpperCAmelCase__ , in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase__)
# mid
A__ = get_mid_block(
UpperCAmelCase__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase__ , add_downsample=UpperCAmelCase__ , )
# up
A__ = list(reversed(UpperCAmelCase__))
A__ = reversed_block_out_channels[0]
if out_block_type is None:
A__ = out_channels
else:
A__ = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase__):
A__ = output_channel
A__ = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase__) - 1 else final_upsample_channels
)
A__ = i == len(UpperCAmelCase__) - 1
A__ = get_up_block(
UpperCAmelCase__ , num_layers=UpperCAmelCase__ , in_channels=UpperCAmelCase__ , out_channels=UpperCAmelCase__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase__)
A__ = output_channel
# out
A__ = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32)
A__ = get_out_block(
out_block_type=UpperCAmelCase__ , num_groups_out=UpperCAmelCase__ , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase__ , act_fn=UpperCAmelCase__ , fc_dim=block_out_channels[-1] // 4 , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , UpperCAmelCase__ : torch.FloatTensor , UpperCAmelCase__ : Union[torch.Tensor, float, int] , UpperCAmelCase__ : bool = True , ) ->Union[UNetaDOutput, Tuple]:
'''simple docstring'''
A__ = timestep
if not torch.is_tensor(UpperCAmelCase__):
A__ = torch.tensor([timesteps] , dtype=torch.long , device=sample.device)
elif torch.is_tensor(UpperCAmelCase__) and len(timesteps.shape) == 0:
A__ = timesteps[None].to(sample.device)
A__ = self.time_proj(UpperCAmelCase__)
if self.config.use_timestep_embedding:
A__ = self.time_mlp(UpperCAmelCase__)
else:
A__ = timestep_embed[..., None]
A__ = timestep_embed.repeat([1, 1, sample.shape[2]]).to(sample.dtype)
A__ = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]))
# 2. down
A__ = ()
for downsample_block in self.down_blocks:
A__ , A__ = downsample_block(hidden_states=UpperCAmelCase__ , temb=UpperCAmelCase__)
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A__ = self.mid_block(UpperCAmelCase__ , UpperCAmelCase__)
# 4. up
for i, upsample_block in enumerate(self.up_blocks):
A__ = down_block_res_samples[-1:]
A__ = down_block_res_samples[:-1]
A__ = upsample_block(UpperCAmelCase__ , res_hidden_states_tuple=UpperCAmelCase__ , temb=UpperCAmelCase__)
# 5. post-process
if self.out_block:
A__ = self.out_block(UpperCAmelCase__ , UpperCAmelCase__)
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase__)
| 87 |
'''simple docstring'''
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__A =2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__A ={
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.1_5},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
__A ={}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__A ='facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__A ='allenai'
def _UpperCamelCase ( UpperCamelCase__ ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
UpperCAmelCase__ : Union[str, Any] = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase__ ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase__ ), v) for k, v in d.items() )
UpperCAmelCase__ : Optional[int] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
UpperCAmelCase__ : str = d[k] # restore
return da
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
# prep
assert os.path.exists(UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
UpperCAmelCase__ : str = basename(UpperCamelCase__ )
UpperCAmelCase__ : Optional[int] = dirname(UpperCamelCase__ )
UpperCAmelCase__ : List[str] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
UpperCAmelCase__ : Optional[int] = cls.hub_models()
UpperCAmelCase__ : Optional[int] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
UpperCAmelCase__ : Union[str, Any] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
UpperCAmelCase__ : int = hub_utils.from_pretrained(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , archive_map=UpperCamelCase__ , **UpperCamelCase__ )
UpperCAmelCase__ : List[Any] = vars(chkpt["""args"""]["""model"""] )
UpperCAmelCase__ : Dict = args["""source_lang"""]
UpperCAmelCase__ : int = args["""target_lang"""]
UpperCAmelCase__ : Dict = dirname(UpperCamelCase__ )
UpperCAmelCase__ : Any = basename(UpperCamelCase__ )
# dicts
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , f'''dict.{src_lang}.txt''' )
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , f'''dict.{tgt_lang}.txt''' )
UpperCAmelCase__ : str = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = rewrite_dict_keys(src_dict.indices )
UpperCAmelCase__ : List[Any] = len(UpperCamelCase__ )
UpperCAmelCase__ : Tuple = os.path.join(UpperCamelCase__ , """vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
UpperCAmelCase__ : List[str] = True
for k in src_vocab.keys():
if not k.islower():
UpperCAmelCase__ : Optional[int] = False
break
UpperCAmelCase__ : Tuple = Dictionary.load(UpperCamelCase__ )
UpperCAmelCase__ : Any = rewrite_dict_keys(tgt_dict.indices )
UpperCAmelCase__ : Tuple = len(UpperCamelCase__ )
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , """vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# merges_file (bpecodes)
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
UpperCAmelCase__ : int = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
break
with open(UpperCamelCase__ , encoding="""utf-8""" ) as fin:
UpperCAmelCase__ : int = fin.read()
UpperCAmelCase__ : Union[str, Any] = re.sub(R""" \d+$""" , """""" , UpperCamelCase__ , 0 , re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as fout:
fout.write(UpperCamelCase__ )
# model config
UpperCAmelCase__ : Tuple = os.path.join(UpperCamelCase__ , """config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
UpperCAmelCase__ : int = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
UpperCAmelCase__ : List[Any] = 5
UpperCAmelCase__ : str = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
UpperCAmelCase__ : Any = best_score_hparams[model_dir]["""length_penalty"""]
else:
UpperCAmelCase__ : Dict = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# tokenizer config
UpperCAmelCase__ : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_0_2_4,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) )
# model
UpperCAmelCase__ : Dict = chkpt["""models"""][0]
UpperCAmelCase__ : int = model.state_dict()
# rename keys to start with 'model.'
UpperCAmelCase__ : List[Any] = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
UpperCAmelCase__ : Dict = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = FSMTConfig.from_pretrained(UpperCamelCase__ )
UpperCAmelCase__ : Union[str, Any] = FSMTForConditionalGeneration(UpperCamelCase__ )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
# save
UpperCAmelCase__ : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__A =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__A =parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path) | 407 | 0 |
"""simple docstring"""
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0_0_0_0_0 , __UpperCamelCase : int = 1_0 ):
"""simple docstring"""
__UpperCamelCase =defaultdict(__UpperCamelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__UpperCamelCase =max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__UpperCamelCase =1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(__UpperCamelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 1_0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 296 | """simple docstring"""
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _lowercase ( __a , unittest.TestCase ):
"""simple docstring"""
lowercase__ = MgpstrTokenizer
lowercase__ = False
lowercase__ = {}
lowercase__ = False
def UpperCAmelCase_ ( self : int ) -> Tuple:
'''simple docstring'''
super().setUp()
# fmt: off
__UpperCamelCase =['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z''']
# fmt: on
__UpperCamelCase =dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
__UpperCamelCase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '''\n''' )
def UpperCAmelCase_ ( self : str , **UpperCamelCase__ : str ) -> Optional[int]:
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def UpperCAmelCase_ ( self : List[Any] , UpperCamelCase__ : int ) -> Dict:
'''simple docstring'''
__UpperCamelCase ='''tester'''
__UpperCamelCase ='''tester'''
return input_text, output_text
@unittest.skip('''MGP-STR always lower cases letters.''' )
def UpperCAmelCase_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
pass
def UpperCAmelCase_ ( self : Tuple ) -> Dict:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase ='''[SPECIAL_TOKEN]'''
tokenizer.add_special_tokens({'''cls_token''': special_token} )
__UpperCamelCase =tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase__ )
self.assertEqual(len(UpperCamelCase__ ) , 1 )
__UpperCamelCase =tokenizer.decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase_ ( self : int ) -> Optional[int]:
'''simple docstring'''
__UpperCamelCase =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
__UpperCamelCase , __UpperCamelCase =self.get_input_output_texts(UpperCamelCase__ )
__UpperCamelCase =tokenizer.tokenize(UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
__UpperCamelCase =tokenizer.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
__UpperCamelCase =tokenizer.convert_ids_to_tokens(UpperCamelCase__ )
self.assertNotEqual(len(UpperCamelCase__ ) , 0 )
__UpperCamelCase =tokenizer.decode(UpperCamelCase__ )
self.assertIsInstance(UpperCamelCase__ , UpperCamelCase__ )
self.assertEqual(text_a.replace(''' ''' , '''''' ) , UpperCamelCase__ )
@unittest.skip('''MGP-STR tokenizer only handles one sequence.''' )
def UpperCAmelCase_ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' )
def UpperCAmelCase_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
| 296 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class __A :
a__ : int
a__ : TreeNode | None = None
a__ : TreeNode | None = None
SCREAMING_SNAKE_CASE_: Union[str, Any] =namedtuple('CoinsDistribResult', 'moves excess')
def lowerCAmelCase_ ( snake_case_ : TreeNode | None ) -> int:
'''simple docstring'''
if root is None:
return 0
# Validation
def count_nodes(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(snake_case_ : TreeNode | None ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(snake_case_ ) != count_coins(snake_case_ ):
raise ValueError("The nodes number should be same as the number of coins" )
# Main calculation
def get_distrib(snake_case_ : TreeNode | None ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.left )
UpperCAmelCase_ , UpperCAmelCase_ = get_distrib(node.right )
UpperCAmelCase_ = 1 - left_distrib_excess
UpperCAmelCase_ = 1 - right_distrib_excess
UpperCAmelCase_ = (
left_distrib_moves
+ right_distrib_moves
+ abs(snake_case_ )
+ abs(snake_case_ )
)
UpperCAmelCase_ = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(snake_case_ , snake_case_ )
return get_distrib(snake_case_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 78 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 0 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_UpperCamelCase: Tuple =logging.get_logger(__name__)
class __lowercase:
"""simple docstring"""
def __init__( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int] ) -> List[Any]:
_lowerCAmelCase = question_encoder
_lowerCAmelCase = generator
_lowerCAmelCase = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self : List[str] , _lowerCAmelCase : Any ) -> Optional[int]:
if os.path.isfile(_lowerCAmelCase ):
raise ValueError(F'''Provided path ({save_directory}) should be a directory, not a file''' )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
_lowerCAmelCase = os.path.join(_lowerCAmelCase , 'question_encoder_tokenizer' )
_lowerCAmelCase = os.path.join(_lowerCAmelCase , 'generator_tokenizer' )
self.question_encoder.save_pretrained(_lowerCAmelCase )
self.generator.save_pretrained(_lowerCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Optional[int] , _lowerCAmelCase : int , **_lowerCAmelCase : Optional[int] ) -> Tuple:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase = kwargs.pop('config' , _lowerCAmelCase )
if config is None:
_lowerCAmelCase = RagConfig.from_pretrained(_lowerCAmelCase )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
_lowerCAmelCase , config=config.question_encoder , subfolder='question_encoder_tokenizer' )
_lowerCAmelCase = AutoTokenizer.from_pretrained(
_lowerCAmelCase , config=config.generator , subfolder='generator_tokenizer' )
return cls(question_encoder=_lowerCAmelCase , generator=_lowerCAmelCase )
def __call__( self : Tuple , *_lowerCAmelCase : Optional[Any] , **_lowerCAmelCase : List[str] ) -> int:
return self.current_tokenizer(*_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , *_lowerCAmelCase : str , **_lowerCAmelCase : Optional[int] ) -> Any:
return self.generator.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : Dict , *_lowerCAmelCase : Optional[int] , **_lowerCAmelCase : Optional[int] ) -> Optional[Any]:
return self.generator.decode(*_lowerCAmelCase , **_lowerCAmelCase )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Optional[int]:
_lowerCAmelCase = self.question_encoder
def SCREAMING_SNAKE_CASE_ ( self : str ) -> List[Any]:
_lowerCAmelCase = self.generator
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[List[str]] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : Optional[int] = None , _lowerCAmelCase : str = "longest" , _lowerCAmelCase : str = None , _lowerCAmelCase : bool = True , **_lowerCAmelCase : str , ) -> BatchEncoding:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' , _lowerCAmelCase , )
if max_length is None:
_lowerCAmelCase = self.current_tokenizer.model_max_length
_lowerCAmelCase = self(
_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , max_length=_lowerCAmelCase , padding=_lowerCAmelCase , truncation=_lowerCAmelCase , **_lowerCAmelCase , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase = self.current_tokenizer.model_max_length
_lowerCAmelCase = self(
text_target=_lowerCAmelCase , add_special_tokens=_lowerCAmelCase , return_tensors=_lowerCAmelCase , padding=_lowerCAmelCase , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , **_lowerCAmelCase , )
_lowerCAmelCase = labels['input_ids']
return model_inputs
| 585 |
from __future__ import annotations
import math
class __lowercase:
"""simple docstring"""
def __init__( self : str , _lowerCAmelCase : int ) -> None:
_lowerCAmelCase = size
# approximate the overall size of segment tree with given value
_lowerCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_lowerCAmelCase = [0 for i in range(0 , 4 * size )]
_lowerCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def SCREAMING_SNAKE_CASE_ ( self : Tuple , _lowerCAmelCase : int ) -> int:
return idx * 2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , _lowerCAmelCase : int ) -> int:
return idx * 2 + 1
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] ) -> None:
if left_element == right_element:
_lowerCAmelCase = a[left_element - 1]
else:
_lowerCAmelCase = (left_element + right_element) // 2
self.build(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.build(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
def SCREAMING_SNAKE_CASE_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> bool:
if self.flag[idx] is True:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = False
if left_element != right_element:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = True
_lowerCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_lowerCAmelCase = val
if left_element != right_element:
_lowerCAmelCase = val
_lowerCAmelCase = val
_lowerCAmelCase = True
_lowerCAmelCase = True
return True
_lowerCAmelCase = (left_element + right_element) // 2
self.update(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.update(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
return True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if self.flag[idx] is True:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = False
if left_element != right_element:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = True
_lowerCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_lowerCAmelCase = (left_element + right_element) // 2
_lowerCAmelCase = self.query(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.query(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return max(_lowerCAmelCase , _lowerCAmelCase )
def __str__( self : Optional[Any] ) -> str:
return str([self.query(1 , 1 , self.size , _lowerCAmelCase , _lowerCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCamelCase: Optional[Any] =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCamelCase: Optional[Any] =15
_UpperCamelCase: Union[str, Any] =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 585 | 1 |
import numpy as np
UpperCamelCase = [
["a", "b", "c", "d", "e"],
["f", "g", "h", "i", "k"],
["l", "m", "n", "o", "p"],
["q", "r", "s", "t", "u"],
["v", "w", "x", "y", "z"],
]
class lowerCAmelCase_ :
def __init__( self ):
_lowercase : Any = np.array(_lowerCAmelCase )
def __a ( self , _lowerCAmelCase ):
_lowercase , _lowercase : List[str] = np.where(letter == self.SQUARE )
_lowercase : Optional[int] = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def __a ( self , _lowerCAmelCase , _lowerCAmelCase ):
_lowercase : int = self.SQUARE[indexa - 1, indexa - 1]
return letter
def __a ( self , _lowerCAmelCase ):
_lowercase : int = message.lower()
_lowercase : Optional[Any] = message.replace(' ' , '' )
_lowercase : Optional[Any] = message.replace('j' , 'i' )
_lowercase : Optional[int] = np.empty((2, len(_lowerCAmelCase )) )
for letter_index in range(len(_lowerCAmelCase ) ):
_lowercase : Any = self.letter_to_numbers(message[letter_index] )
_lowercase : Tuple = numbers[0]
_lowercase : int = numbers[1]
_lowercase : int = first_step.reshape(2 * len(_lowerCAmelCase ) )
_lowercase : Tuple = ''
for numbers_index in range(len(_lowerCAmelCase ) ):
_lowercase : int = int(second_step[numbers_index * 2] )
_lowercase : List[Any] = int(second_step[(numbers_index * 2) + 1] )
_lowercase : Optional[int] = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : List[Any] = encoded_message + letter
return encoded_message
def __a ( self , _lowerCAmelCase ):
_lowercase : Tuple = message.lower()
message.replace(' ' , '' )
_lowercase : Optional[Any] = np.empty(2 * len(_lowerCAmelCase ) )
for letter_index in range(len(_lowerCAmelCase ) ):
_lowercase : List[Any] = self.letter_to_numbers(message[letter_index] )
_lowercase : int = numbers[0]
_lowercase : Union[str, Any] = numbers[1]
_lowercase : Optional[int] = first_step.reshape((2, len(_lowerCAmelCase )) )
_lowercase : Tuple = ''
for numbers_index in range(len(_lowerCAmelCase ) ):
_lowercase : Tuple = int(second_step[0, numbers_index] )
_lowercase : Dict = int(second_step[1, numbers_index] )
_lowercase : List[Any] = self.numbers_to_letter(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Dict = decoded_message + letter
return decoded_message
| 66 |
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :List[str] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__A ) )
def _snake_case ( self :Optional[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__A ) )
def _snake_case ( self :Tuple ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :List[Any] ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Optional[int] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertTrue(is_safetensors_compatible(__A , variant=__A ) )
def _snake_case ( self :Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
SCREAMING_SNAKE_CASE__ = """fp16"""
self.assertFalse(is_safetensors_compatible(__A , variant=__A ) ) | 6 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
A : Dict = TypeVar('''T''')
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
return (position - 1) // 2
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
return (2 * position) + 1
def __lowerCamelCase ( __a :int ) -> int:
"""simple docstring"""
return (2 * position) + 2
class A (Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ) -> None:
"""simple docstring"""
A__ = []
A__ = {}
A__ = 0
def __len__( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.elements
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return str(self.heap )
def a_ ( self : str ) -> bool:
"""simple docstring"""
return self.elements == 0
def a_ ( self : Union[str, Any] , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
self.heap.append((elem, weight) )
A__ = self.elements
self.elements += 1
self._bubble_up(__lowerCAmelCase )
def a_ ( self : str ) -> T:
"""simple docstring"""
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
A__ , A__ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
A__ , A__ = self.heap[0]
self._bubble_down(__lowerCAmelCase )
return elem
def a_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
A__ = self.position_map[elem]
A__ = (elem, weight)
if position > 0:
A__ = get_parent_position(__lowerCAmelCase )
A__ , A__ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
else:
self._bubble_down(__lowerCAmelCase )
def a_ ( self : Any , __lowerCAmelCase : T ) -> None:
"""simple docstring"""
A__ = self.position_map[elem]
if curr_pos == 0:
return None
A__ = get_parent_position(__lowerCAmelCase )
A__ , A__ = self.heap[curr_pos]
A__ , A__ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_up(__lowerCAmelCase )
return None
def a_ ( self : Optional[int] , __lowerCAmelCase : T ) -> None:
"""simple docstring"""
A__ = self.position_map[elem]
A__ , A__ = self.heap[curr_pos]
A__ = get_child_left_position(__lowerCAmelCase )
A__ = get_child_right_position(__lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
A__ , A__ = self.heap[child_left_position]
A__ , A__ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
if child_left_position < self.elements:
A__ , A__ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
A__ , A__ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(__lowerCAmelCase , __lowerCAmelCase )
return self._bubble_down(__lowerCAmelCase )
return None
def a_ ( self : Dict , __lowerCAmelCase : int , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
A__ = self.heap[nodea_pos][0]
A__ = self.heap[nodea_pos][0]
A__ , A__ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
A__ = nodea_pos
A__ = nodea_pos
class A (Generic[T] ):
'''simple docstring'''
def __init__( self : Tuple ) -> None:
"""simple docstring"""
A__ = {}
A__ = 0
def __repr__( self : Dict ) -> str:
"""simple docstring"""
return str(self.connections )
def __len__( self : Optional[int] ) -> int:
"""simple docstring"""
return self.nodes
def a_ ( self : List[Any] , __lowerCAmelCase : T ) -> None:
"""simple docstring"""
if node not in self.connections:
A__ = {}
self.nodes += 1
def a_ ( self : int , __lowerCAmelCase : T , __lowerCAmelCase : T , __lowerCAmelCase : int ) -> None:
"""simple docstring"""
self.add_node(__lowerCAmelCase )
self.add_node(__lowerCAmelCase )
A__ = weight
A__ = weight
def __lowerCamelCase ( __a :GraphUndirectedWeighted[T] , ) -> tuple[dict[T, int], dict[T, T | None]]:
"""simple docstring"""
A__ = {node: maxsize for node in graph.connections}
A__ = {node: None for node in graph.connections}
A__ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__a , __a )
if priority_queue.is_empty():
return dist, parent
# initialization
A__ = priority_queue.extract_min()
A__ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a , dist[neighbour] )
A__ = node
# running prim's algorithm
while not priority_queue.is_empty():
A__ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
A__ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__a , dist[neighbour] )
A__ = node
return dist, parent
| 721 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
A : Optional[Any] = logging.getLogger(__name__)
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCAmelCase : Any=-1 ) -> Any:
"""simple docstring"""
A__ = label_idx
def a_ ( self : str , __lowerCAmelCase : int , __lowerCAmelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = mode.value
A__ = os.path.join(__lowerCAmelCase , f'{mode}.txt' )
A__ = 1
A__ = []
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
A__ = []
A__ = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) )
guid_index += 1
A__ = []
A__ = []
else:
A__ = line.split(""" """ )
words.append(splits[0] )
if len(__lowerCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) )
return examples
def a_ ( self : List[str] , __lowerCAmelCase : TextIO , __lowerCAmelCase : TextIO , __lowerCAmelCase : List ) -> Dict:
"""simple docstring"""
A__ = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(__lowerCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
A__ = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(__lowerCAmelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def a_ ( self : str , __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(__lowerCAmelCase , """r""" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(label_idx=-2 )
def a_ ( self : Union[str, Any] , __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(__lowerCAmelCase , """r""" ) as f:
A__ = f.read().splitlines()
if "O" not in labels:
A__ = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def a_ ( self : List[Any] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[Split, str] ) -> List[InputExample]:
"""simple docstring"""
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
A__ = mode.value
A__ = os.path.join(__lowerCAmelCase , f'{mode}.txt' )
A__ = 1
A__ = []
with open(__lowerCAmelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(__lowerCAmelCase ):
A__ = []
A__ = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(__lowerCAmelCase ) == len(__lowerCAmelCase )
if words:
examples.append(InputExample(guid=f'{mode}-{guid_index}' , words=__lowerCAmelCase , labels=__lowerCAmelCase ) )
guid_index += 1
return examples
def a_ ( self : int , __lowerCAmelCase : TextIO , __lowerCAmelCase : TextIO , __lowerCAmelCase : List ) -> List[str]:
"""simple docstring"""
A__ = 0
for sentence in parse_incr(__lowerCAmelCase ):
A__ = preds_list[example_id]
A__ = """"""
for token in sentence:
out += f'{token["form"]} ({token["upos"]}|{s_p.pop(0 )}) '
out += "\n"
writer.write(__lowerCAmelCase )
example_id += 1
def a_ ( self : Any , __lowerCAmelCase : str ) -> List[str]:
"""simple docstring"""
if path:
with open(__lowerCAmelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 247 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ : Dict = {'''configuration_mra''': ['''MRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MraConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ : Union[str, Any] = [
'''MRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MraForMaskedLM''',
'''MraForMultipleChoice''',
'''MraForQuestionAnswering''',
'''MraForSequenceClassification''',
'''MraForTokenClassification''',
'''MraLayer''',
'''MraModel''',
'''MraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowercase_ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 588 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def SCREAMING_SNAKE_CASE ( lowercase_ : List[Any] ):
lowercase = model.config
lowercase = DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , )
lowercase = MBartConfig(
is_decoder=lowercase_ , is_encoder_decoder=lowercase_ , add_cross_attention=lowercase_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=lowercase_ , add_final_layer_norm=lowercase_ , )
return encoder_config, decoder_config
def SCREAMING_SNAKE_CASE ( lowercase_ : Dict ):
if "encoder.model" in name:
lowercase = name.replace("""encoder.model""" , """encoder""" )
if "decoder.model" in name:
lowercase = name.replace("""decoder.model""" , """decoder""" )
if "patch_embed.proj" in name:
lowercase = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowercase = name.replace("""patch_embed.norm""" , """embeddings.norm""" )
if name.startswith("""encoder""" ):
if "layers" in name:
lowercase = """encoder.""" + name
if "attn.proj" in name:
lowercase = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "mask" not in name:
lowercase = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowercase = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowercase = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowercase = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowercase = name.replace("""mlp.fc2""" , """output.dense""" )
if name == "encoder.norm.weight":
lowercase = """encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase = """encoder.layernorm.bias"""
return name
def SCREAMING_SNAKE_CASE ( lowercase_ : Tuple , lowercase_ : Dict ):
for key in orig_state_dict.copy().keys():
lowercase = orig_state_dict.pop(lowercase_ )
if "qkv" in key:
lowercase = key.split(""".""" )
lowercase = int(key_split[3] )
lowercase = int(key_split[5] )
lowercase = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase = val[:dim, :]
lowercase = val[dim : dim * 2, :]
lowercase = val[-dim:, :]
else:
lowercase = val[:dim]
lowercase = val[dim : dim * 2]
lowercase = val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ : Optional[Any] , lowercase_ : str=None , lowercase_ : Optional[Any]=False ):
# load original model
lowercase = DonutModel.from_pretrained(lowercase_ ).eval()
# load HuggingFace model
lowercase , lowercase = get_configs(lowercase_ )
lowercase = DonutSwinModel(lowercase_ )
lowercase = MBartForCausalLM(lowercase_ )
lowercase = VisionEncoderDecoderModel(encoder=lowercase_ , decoder=lowercase_ )
model.eval()
lowercase = original_model.state_dict()
lowercase = convert_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ )
# verify results on scanned document
lowercase = load_dataset("""hf-internal-testing/example-documents""" )
lowercase = dataset["""test"""][0]["""image"""].convert("""RGB""" )
lowercase = XLMRobertaTokenizerFast.from_pretrained(lowercase_ , from_slow=lowercase_ )
lowercase = DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase = DonutProcessor(lowercase_ , lowercase_ )
lowercase = processor(lowercase_ , return_tensors="""pt""" ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase = """When is the coffee break?"""
lowercase = task_prompt.replace("""{user_input}""" , lowercase_ )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase = """<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase = """<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase = """s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase = """<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase = """hello world"""
else:
raise ValueError("""Model name not supported""" )
lowercase = original_model.decoder.tokenizer(lowercase_ , add_special_tokens=lowercase_ , return_tensors="""pt""" )[
"""input_ids"""
]
lowercase = original_model.encoder.model.patch_embed(lowercase_ )
lowercase , lowercase = model.encoder.embeddings(lowercase_ )
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
# verify encoder hidden states
lowercase = original_model.encoder(lowercase_ )
lowercase = model.encoder(lowercase_ ).last_hidden_state
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-2 )
# verify decoder hidden states
lowercase = original_model(lowercase_ , lowercase_ , lowercase_ ).logits
lowercase = model(lowercase_ , decoder_input_ids=lowercase_ ).logits
assert torch.allclose(lowercase_ , lowercase_ , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(F"""Saving model and processor to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowercase_ )
processor.save_pretrained(lowercase_ )
if push_to_hub:
model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" )
if __name__ == "__main__":
lowercase_ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''naver-clova-ix/donut-base-finetuned-docvqa''',
required=False,
type=str,
help='''Name of the original model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
required=False,
type=str,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether or not to push the converted model and processor to the 🤗 hub.''',
)
lowercase_ : Dict = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 588 | 1 |
import requests
_A = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : str ) -> None:
"""simple docstring"""
a_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["""articles"""] , 1 ):
print(F"""{i}.) {article["title"]}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>') | 700 |
from typing import Any
def __SCREAMING_SNAKE_CASE ( UpperCamelCase : list ) -> list[Any]:
"""simple docstring"""
if not input_list:
return []
a_ = [input_list.count(UpperCamelCase ) for value in input_list]
a_ = max(UpperCamelCase ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(UpperCamelCase ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod() | 403 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] = AudioLDMPipeline
a : Optional[Any] = TEXT_TO_AUDIO_PARAMS
a : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
a : Optional[int] = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCAmelCase ( self : Any ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowercase , )
__UpperCAmelCase : Optional[int] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
__UpperCAmelCase : Optional[int] = ClapTextModelWithProjection(__lowercase )
__UpperCAmelCase : str = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__UpperCAmelCase : Dict = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowercase , )
__UpperCAmelCase : int = SpeechTaHifiGan(__lowercase )
__UpperCAmelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def UpperCAmelCase ( self : Optional[int] , __lowercase : Any , __lowercase : str=0 ) -> List[str]:
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : Dict = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Tuple = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : List[Any] = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(**__lowercase )
__UpperCAmelCase : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 256
__UpperCAmelCase : str = audio[:10]
__UpperCAmelCase : List[Any] = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(__lowercase )
__UpperCAmelCase : str = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Dict = 3 * [inputs["""prompt"""]]
# forward
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(**__lowercase )
__UpperCAmelCase : int = output.audios[0]
__UpperCAmelCase : List[str] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Any = 3 * [inputs.pop("""prompt""" )]
__UpperCAmelCase : Tuple = audioldm_pipe.tokenizer(
__lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors="""pt""" , )
__UpperCAmelCase : Optional[Any] = text_inputs["""input_ids"""].to(__lowercase )
__UpperCAmelCase : int = audioldm_pipe.text_encoder(
__lowercase , )
__UpperCAmelCase : Dict = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : Tuple = F.normalize(__lowercase , dim=-1 )
__UpperCAmelCase : Tuple = prompt_embeds
# forward
__UpperCAmelCase : Dict = audioldm_pipe(**__lowercase )
__UpperCAmelCase : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(__lowercase )
__UpperCAmelCase : int = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = 3 * ["""this is a negative prompt"""]
__UpperCAmelCase : Optional[Any] = negative_prompt
__UpperCAmelCase : Tuple = 3 * [inputs["""prompt"""]]
# forward
__UpperCAmelCase : int = audioldm_pipe(**__lowercase )
__UpperCAmelCase : Any = output.audios[0]
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
__UpperCAmelCase : List[Any] = []
for p in [prompt, negative_prompt]:
__UpperCAmelCase : List[str] = audioldm_pipe.tokenizer(
__lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors="""pt""" , )
__UpperCAmelCase : Union[str, Any] = text_inputs["""input_ids"""].to(__lowercase )
__UpperCAmelCase : Optional[Any] = audioldm_pipe.text_encoder(
__lowercase , )
__UpperCAmelCase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : Any = F.normalize(__lowercase , dim=-1 )
embeds.append(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = embeds
# forward
__UpperCAmelCase : str = audioldm_pipe(**__lowercase )
__UpperCAmelCase : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=__lowercase )
__UpperCAmelCase : Tuple = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : str = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = """egg cracking"""
__UpperCAmelCase : Optional[Any] = audioldm_pipe(**__lowercase , negative_prompt=__lowercase )
__UpperCAmelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 256
__UpperCAmelCase : Union[str, Any] = audio[:10]
__UpperCAmelCase : int = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : str ) -> Any:
__UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=__lowercase )
__UpperCAmelCase : Tuple = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : str = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(__lowercase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : int = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__UpperCAmelCase : int = 2
__UpperCAmelCase : str = audioldm_pipe(__lowercase , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__UpperCAmelCase : Any = 2
__UpperCAmelCase : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : int = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = audioldm_pipe.vocoder.config.sampling_rate
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **__lowercase )
__UpperCAmelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.016
__UpperCAmelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **__lowercase )
__UpperCAmelCase : Dict = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = ["""hey"""]
__UpperCAmelCase : Dict = audioldm_pipe(__lowercase , num_inference_steps=1 )
__UpperCAmelCase : Tuple = output.audios.shape
assert audio_shape == (1, 256)
__UpperCAmelCase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__UpperCAmelCase : List[Any] = SpeechTaHifiGan(__lowercase ).to(__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe(__lowercase , num_inference_steps=1 )
__UpperCAmelCase : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase )
def UpperCAmelCase ( self : str ) -> Any:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowercase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase )
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Dict ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : int="cpu" , __lowercase : List[Any]=torch.floataa , __lowercase : Tuple=0 ) -> Dict:
__UpperCAmelCase : int = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Dict = np.random.RandomState(__lowercase ).standard_normal((1, 8, 128, 16) )
__UpperCAmelCase : Optional[Any] = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__UpperCAmelCase : int = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def UpperCAmelCase ( self : int ) -> List[str]:
__UpperCAmelCase : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__UpperCAmelCase : Union[str, Any] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Tuple = self.get_inputs(__lowercase )
__UpperCAmelCase : str = 25
__UpperCAmelCase : Optional[int] = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 81920
__UpperCAmelCase : Dict = audio[77230:77240]
__UpperCAmelCase : Optional[Any] = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
__UpperCAmelCase : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : Optional[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__UpperCAmelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__UpperCAmelCase : int = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = self.get_inputs(__lowercase )
__UpperCAmelCase : Optional[int] = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 81920
__UpperCAmelCase : int = audio[27780:27790]
__UpperCAmelCase : Optional[Any] = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
__UpperCAmelCase : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 63 | import sys
from collections import defaultdict
class lowerCAmelCase_ :
def __init__( self : Optional[int] ):
_UpperCamelCase = []
def UpperCamelCase_ ( self : Any , _A : str ):
return self.node_position[vertex]
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ):
_UpperCamelCase = pos
def UpperCamelCase_ ( self : Any , _A : List[str] , _A : int , _A : Optional[Any] , _A : Union[str, Any] ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCamelCase = 2 * start + 1
else:
_UpperCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCamelCase , _UpperCamelCase = heap[smallest_child], positions[smallest_child]
_UpperCamelCase , _UpperCamelCase = (
heap[start],
positions[start],
)
_UpperCamelCase , _UpperCamelCase = temp, tempa
_UpperCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , _A )
self.top_to_bottom(_A , _A , _A , _A )
def UpperCamelCase_ ( self : List[str] , _A : Tuple , _A : Optional[Any] , _A : int , _A : Optional[int] ):
_UpperCamelCase = position[index]
while index != 0:
_UpperCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCamelCase = heap[parent]
_UpperCamelCase = position[parent]
self.set_position(position[parent] , _A )
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , _A )
break
_UpperCamelCase = parent
else:
_UpperCamelCase = val
_UpperCamelCase = temp
self.set_position(_A , 0 )
def UpperCamelCase_ ( self : int , _A : Tuple , _A : int ):
_UpperCamelCase = len(_A ) // 2 - 1
for i in range(_A , -1 , -1 ):
self.top_to_bottom(_A , _A , len(_A ) , _A )
def UpperCamelCase_ ( self : Any , _A : int , _A : List[str] ):
_UpperCamelCase = positions[0]
_UpperCamelCase = sys.maxsize
self.top_to_bottom(_A , 0 , len(_A ) , _A )
return temp
def _snake_case ( __snake_case ):
_UpperCamelCase = Heap()
_UpperCamelCase = [0] * len(__snake_case )
_UpperCamelCase = [-1] * len(__snake_case ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCamelCase = []
for vertex in range(len(__snake_case ) ):
distance_tv.append(sys.maxsize )
positions.append(__snake_case )
heap.node_position.append(__snake_case )
_UpperCamelCase = []
_UpperCamelCase = 1
_UpperCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCamelCase = 0
_UpperCamelCase = distance
heap.heapify(__snake_case , __snake_case )
for _ in range(1 , len(__snake_case ) ):
_UpperCamelCase = heap.delete_minimum(__snake_case , __snake_case )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__snake_case )]
):
_UpperCamelCase = distance
heap.bottom_to_top(
__snake_case , heap.get_position(__snake_case ) , __snake_case , __snake_case )
_UpperCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase = int(input("Enter number of edges: ").strip())
_lowerCAmelCase = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 10 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def __snake_case ( lowercase : Optional[int]=None ):
if subparsers is not None:
snake_case_ = subparsers.add_parser("test" )
else:
snake_case_ = argparse.ArgumentParser("Accelerate test command" )
parser.add_argument(
"--config_file" , default=lowercase , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def __snake_case ( lowercase : List[Any] ):
snake_case_ = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] )
if args.config_file is None:
snake_case_ = script_name
else:
snake_case_ = f'''--config_file={args.config_file} {script_name}'''
snake_case_ = ["accelerate-launch"] + test_args.split()
snake_case_ = execute_subprocess_async(lowercase , env=os.environ.copy() )
if result.returncode == 0:
print("Test is a success! You are ready for your distributed training!" )
def __snake_case ( ):
snake_case_ = test_command_parser()
snake_case_ = parser.parse_args()
test_command(lowercase )
if __name__ == "__main__":
main()
| 420 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase__ = {
'''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''],
'''tokenization_tapas''': ['''TapasTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TapasForMaskedLM''',
'''TapasForQuestionAnswering''',
'''TapasForSequenceClassification''',
'''TapasModel''',
'''TapasPreTrainedModel''',
'''load_tf_weights_in_tapas''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFTapasForMaskedLM''',
'''TFTapasForQuestionAnswering''',
'''TFTapasForSequenceClassification''',
'''TFTapasModel''',
'''TFTapasPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 420 | 1 |
def __a ( ) -> Any:
"""simple docstring"""
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __a ( __UpperCAmelCase : List[str] ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : Dict = 2
while i * i <= n:
lowerCamelCase_ : List[Any] = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __a ( ) -> Dict:
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(__UpperCAmelCase ) > 500 )
if __name__ == "__main__":
print(solution())
| 488 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __a ( __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str ) -> Optional[Any]:
"""simple docstring"""
with open(__UpperCAmelCase ) as metadata_file:
lowerCamelCase_ : List[str] = json.load(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = LukeConfig(use_entity_aware_attention=__UpperCAmelCase , **metadata["model_config"] )
# Load in the weights from the checkpoint_path
lowerCamelCase_ : Optional[Any] = torch.load(__UpperCAmelCase , map_location="cpu" )["module"]
# Load the entity vocab file
lowerCamelCase_ : Optional[Any] = load_original_entity_vocab(__UpperCAmelCase )
# add an entry for [MASK2]
lowerCamelCase_ : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
lowerCamelCase_ : Optional[Any] = XLMRobertaTokenizer.from_pretrained(metadata["model_config"]["bert_model_name"] )
# Add special tokens to the token vocabulary for downstream tasks
lowerCamelCase_ : Any = AddedToken("<ent>" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = AddedToken("<ent2>" , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase )
tokenizer.add_special_tokens({"additional_special_tokens": [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f"Saving tokenizer to {pytorch_dump_folder_path}" )
tokenizer.save_pretrained(__UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , "tokenizer_config.json" ) , "r" ) as f:
lowerCamelCase_ : Any = json.load(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = "MLukeTokenizer"
with open(os.path.join(__UpperCAmelCase , "tokenizer_config.json" ) , "w" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
with open(os.path.join(__UpperCAmelCase , MLukeTokenizer.vocab_files_names["entity_vocab_file"] ) , "w" ) as f:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
lowerCamelCase_ : Tuple = MLukeTokenizer.from_pretrained(__UpperCAmelCase )
# Initialize the embeddings of the special tokens
lowerCamelCase_ : Optional[int] = tokenizer.convert_tokens_to_ids(["@"] )[0]
lowerCamelCase_ : Dict = tokenizer.convert_tokens_to_ids(["#"] )[0]
lowerCamelCase_ : Optional[int] = state_dict["embeddings.word_embeddings.weight"]
lowerCamelCase_ : List[Any] = word_emb[ent_init_index].unsqueeze(0 )
lowerCamelCase_ : str = word_emb[enta_init_index].unsqueeze(0 )
lowerCamelCase_ : List[str] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
lowerCamelCase_ : List[Any] = state_dict[bias_name]
lowerCamelCase_ : Any = decoder_bias[ent_init_index].unsqueeze(0 )
lowerCamelCase_ : List[Any] = decoder_bias[enta_init_index].unsqueeze(0 )
lowerCamelCase_ : Optional[int] = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
lowerCamelCase_ : str = f"encoder.layer.{layer_index}.attention.self."
lowerCamelCase_ : Optional[Any] = state_dict[prefix + matrix_name]
lowerCamelCase_ : List[str] = state_dict[prefix + matrix_name]
lowerCamelCase_ : str = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
lowerCamelCase_ : Optional[int] = state_dict["entity_embeddings.entity_embeddings.weight"]
lowerCamelCase_ : List[str] = entity_emb[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCamelCase_ : Union[str, Any] = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
lowerCamelCase_ : Optional[int] = state_dict["entity_predictions.bias"]
lowerCamelCase_ : Dict = entity_prediction_bias[entity_vocab["[MASK]"]].unsqueeze(0 )
lowerCamelCase_ : Dict = torch.cat([entity_prediction_bias, entity_mask_bias] )
lowerCamelCase_ : str = LukeForMaskedLM(config=__UpperCAmelCase ).eval()
state_dict.pop("entity_predictions.decoder.weight" )
state_dict.pop("lm_head.decoder.weight" )
state_dict.pop("lm_head.decoder.bias" )
lowerCamelCase_ : str = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith("lm_head" ) or key.startswith("entity_predictions" )):
lowerCamelCase_ : str = state_dict[key]
else:
lowerCamelCase_ : Optional[int] = state_dict[key]
lowerCamelCase_ , lowerCamelCase_ : Union[str, Any] = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
if set(__UpperCAmelCase ) != {"luke.embeddings.position_ids"}:
raise ValueError(f"Unexpected unexpected_keys: {unexpected_keys}" )
if set(__UpperCAmelCase ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f"Unexpected missing_keys: {missing_keys}" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
lowerCamelCase_ : str = MLukeTokenizer.from_pretrained(__UpperCAmelCase , task="entity_classification" )
lowerCamelCase_ : Union[str, Any] = "ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan)."
lowerCamelCase_ : Any = (0, 9)
lowerCamelCase_ : List[Any] = tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors="pt" )
lowerCamelCase_ : Any = model(**__UpperCAmelCase )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase_ : Any = torch.Size((1, 33, 768) )
lowerCamelCase_ : Optional[int] = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
lowerCamelCase_ : Union[str, Any] = torch.Size((1, 1, 768) )
lowerCamelCase_ : str = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f"Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"
f" {expected_shape}" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , __UpperCAmelCase , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
lowerCamelCase_ : Any = MLukeTokenizer.from_pretrained(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = "Tokyo is the capital of <mask>."
lowerCamelCase_ : Optional[int] = (24, 30)
lowerCamelCase_ : Dict = tokenizer(__UpperCAmelCase , entity_spans=[span] , return_tensors="pt" )
lowerCamelCase_ : List[Any] = model(**__UpperCAmelCase )
lowerCamelCase_ : Dict = encoding["input_ids"][0].tolist()
lowerCamelCase_ : Tuple = input_ids.index(tokenizer.convert_tokens_to_ids("<mask>" ) )
lowerCamelCase_ : str = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = outputs.entity_logits[0][0].argmax().item()
lowerCamelCase_ : Tuple = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith("en:" )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print("Saving PyTorch model to {}".format(__UpperCAmelCase ) )
model.save_pretrained(__UpperCAmelCase )
def __a ( __UpperCAmelCase : List[str] ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase_ : int = ["[MASK]", "[PAD]", "[UNK]"]
lowerCamelCase_ : int = [json.loads(__UpperCAmelCase ) for line in open(__UpperCAmelCase )]
lowerCamelCase_ : Optional[Any] = {}
for entry in data:
lowerCamelCase_ : str = entry["id"]
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
lowerCamelCase_ : Union[str, Any] = entity_id
break
lowerCamelCase_ : Any = f"{language}:{entity_name}"
lowerCamelCase_ : int = entity_id
return new_mapping
if __name__ == "__main__":
snake_case_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
snake_case_ : Tuple = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 488 | 1 |
import colorsys
from PIL import Image # type: ignore
def lowerCAmelCase ( _lowerCAmelCase : float , _lowerCAmelCase : float , _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = x
UpperCAmelCase__ = y
for step in range(_lowerCAmelCase ): # noqa: B007
UpperCAmelCase__ = a * a - b * b + x
UpperCAmelCase__ = 2 * a * b + y
UpperCAmelCase__ = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowerCAmelCase ( _lowerCAmelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowerCAmelCase ( _lowerCAmelCase : float ):
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(_lowerCAmelCase , 1 , 1 ) )
def lowerCAmelCase ( _lowerCAmelCase : int = 800 , _lowerCAmelCase : int = 600 , _lowerCAmelCase : float = -0.6 , _lowerCAmelCase : float = 0 , _lowerCAmelCase : float = 3.2 , _lowerCAmelCase : int = 50 , _lowerCAmelCase : bool = True , ):
"""simple docstring"""
UpperCAmelCase__ = Image.new("RGB" , (image_width, image_height) )
UpperCAmelCase__ = img.load()
# loop through the image-coordinates
for image_x in range(_lowerCAmelCase ):
for image_y in range(_lowerCAmelCase ):
# determine the figure-coordinates based on the image-coordinates
UpperCAmelCase__ = figure_width / image_width * image_height
UpperCAmelCase__ = figure_center_x + (image_x / image_width - 0.5) * figure_width
UpperCAmelCase__ = figure_center_y + (image_y / image_height - 0.5) * figure_height
UpperCAmelCase__ = get_distance(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
UpperCAmelCase__ = get_color_coded_rgb(_lowerCAmelCase )
else:
UpperCAmelCase__ = get_black_and_white_rgb(_lowerCAmelCase )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_lowerCAmelCase : int = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 364 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
_lowerCAmelCase : Optional[int] = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : List[str] = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Tuple = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
_lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 364 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase = {
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 66 |
'''simple docstring'''
import importlib
import inspect
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowercase : Tuple = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowercase : str = importlib.util.spec_from_file_location(
"""transformers""",
os.path.join(PATH_TO_TRANSFORMERS, """__init__.py"""),
submodule_search_locations=[PATH_TO_TRANSFORMERS],
)
_lowercase : Optional[Any] = spec.loader.load_module()
_lowercase : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
_lowercase : List[Any] = re.compile("""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
_lowercase : List[str] = {
"""CLIPConfigMixin""",
"""DecisionTransformerConfigMixin""",
"""EncoderDecoderConfigMixin""",
"""RagConfigMixin""",
"""SpeechEncoderDecoderConfigMixin""",
"""VisionEncoderDecoderConfigMixin""",
"""VisionTextDualEncoderConfigMixin""",
}
def lowerCamelCase__ ( ):
'''simple docstring'''
UpperCAmelCase = []
for config_class in list(CONFIG_MAPPING.values() ):
UpperCAmelCase = False
# source code of `config_class`
UpperCAmelCase = inspect.getsource(A )
UpperCAmelCase = _re_checkpoint.findall(A )
for checkpoint in checkpoints:
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
UpperCAmelCase , UpperCAmelCase = checkpoint
# verify the checkpoint name corresponds to the checkpoint link
UpperCAmelCase = f"""https://huggingface.co/{ckpt_name}"""
if ckpt_link == ckpt_link_from_name:
UpperCAmelCase = True
break
UpperCAmelCase = config_class.__name__
if not checkpoint_found and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(A )
if len(A ) > 0:
UpperCAmelCase = '''\n'''.join(sorted(A ) )
raise ValueError(f"""The following configurations don't contain any valid checkpoint:\n{message}""" )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints()
| 210 | 0 |
'''simple docstring'''
import gc
import unittest
from transformers import CTRLConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Dict=1_4 , _lowerCAmelCase : Any=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[Any]=9_9 , _lowerCAmelCase : Optional[Any]=3_2 , _lowerCAmelCase : Tuple=5 , _lowerCAmelCase : str=4 , _lowerCAmelCase : str=3_7 , _lowerCAmelCase : int="gelu" , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : List[str]=5_1_2 , _lowerCAmelCase : List[str]=1_6 , _lowerCAmelCase : Optional[int]=2 , _lowerCAmelCase : List[Any]=0.02 , _lowerCAmelCase : List[Any]=3 , _lowerCAmelCase : Any=4 , _lowerCAmelCase : Optional[Any]=None , ):
'''simple docstring'''
__lowercase =parent
__lowercase =batch_size
__lowercase =seq_length
__lowercase =is_training
__lowercase =use_token_type_ids
__lowercase =use_input_mask
__lowercase =use_labels
__lowercase =use_mc_token_ids
__lowercase =vocab_size
__lowercase =hidden_size
__lowercase =num_hidden_layers
__lowercase =num_attention_heads
__lowercase =intermediate_size
__lowercase =hidden_act
__lowercase =hidden_dropout_prob
__lowercase =attention_probs_dropout_prob
__lowercase =max_position_embeddings
__lowercase =type_vocab_size
__lowercase =type_sequence_label_size
__lowercase =initializer_range
__lowercase =num_labels
__lowercase =num_choices
__lowercase =scope
__lowercase =self.vocab_size - 1
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowercase =None
if self.use_input_mask:
__lowercase =random_attention_mask([self.batch_size, self.seq_length])
__lowercase =None
if self.use_token_type_ids:
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowercase =None
if self.use_mc_token_ids:
__lowercase =ids_tensor([self.batch_size, self.num_choices] , self.seq_length)
__lowercase =None
__lowercase =None
__lowercase =None
if self.use_labels:
__lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowercase =ids_tensor([self.batch_size] , self.num_choices)
__lowercase =self.get_config()
__lowercase =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2)
return (
config,
input_ids,
input_mask,
head_mask,
token_type_ids,
mc_token_ids,
sequence_labels,
token_labels,
choice_labels,
)
def __lowerCamelCase ( self : str):
'''simple docstring'''
return CTRLConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , *_lowerCAmelCase : str):
'''simple docstring'''
__lowercase =CTRLModel(config=_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , head_mask=_lowerCAmelCase)
model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase)
__lowercase =model(_lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
self.parent.assertEqual(len(result.past_key_values) , config.n_layer)
def __lowerCamelCase ( self : Any , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , *_lowerCAmelCase : List[Any]):
'''simple docstring'''
__lowercase =CTRLLMHeadModel(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.prepare_config_and_inputs()
(
(
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) , (
__lowercase
) ,
) =config_and_inputs
__lowercase ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'head_mask': head_mask}
return config, inputs_dict
def __lowerCamelCase ( self : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Dict , *_lowerCAmelCase : Optional[int]):
'''simple docstring'''
__lowercase =self.num_labels
__lowercase =CTRLForSequenceClassification(_lowerCAmelCase)
model.to(_lowerCAmelCase)
model.eval()
__lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowercase =model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
@require_torch
class _UpperCamelCase ( A , A , A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (CTRLModel, CTRLLMHeadModel, CTRLForSequenceClassification) if is_torch_available() else ()
lowerCAmelCase__ = (CTRLLMHeadModel,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
"""feature-extraction""": CTRLModel,
"""text-classification""": CTRLForSequenceClassification,
"""text-generation""": CTRLLMHeadModel,
"""zero-shot""": CTRLForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def __lowerCamelCase ( self : int , _lowerCAmelCase : Tuple , _lowerCAmelCase : str , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any]):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `CTRLConfig` was never used in pipeline tests, either because of a missing checkpoint or because a tiny
# config could not be created.
return True
return False
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
__lowercase =CTRLModelTester(self)
__lowercase =ConfigTester(self , config_class=_lowerCAmelCase , n_embd=3_7)
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Optional[int]):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCamelCase ( self : int):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_ctrl_model(*_lowerCAmelCase)
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
__lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_lowerCAmelCase)
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
pass
@slow
def __lowerCamelCase ( self : Union[str, Any]):
'''simple docstring'''
for model_name in CTRL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowercase =CTRLModel.from_pretrained(_lowerCAmelCase)
self.assertIsNotNone(_lowerCAmelCase)
@unittest.skip('The model doesn\'t support left padding') # and it's not used enough to be worth fixing :)
def __lowerCamelCase ( self : int):
'''simple docstring'''
pass
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self : str):
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
torch.cuda.empty_cache()
@slow
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =CTRLLMHeadModel.from_pretrained('ctrl')
model.to(_lowerCAmelCase)
__lowercase =torch.tensor(
[[1_1_8_5_9, 0, 1_6_1_1, 8]] , dtype=torch.long , device=_lowerCAmelCase) # Legal the president is
__lowercase =[
1_1_8_5_9,
0,
1_6_1_1,
8,
5,
1_5_0,
2_6_4_4_9,
2,
1_9,
3_4_8,
4_6_9,
3,
2_5_9_5,
4_8,
2_0_7_4_0,
2_4_6_5_3_3,
2_4_6_5_3_3,
1_9,
3_0,
5,
] # Legal the president is a good guy and I don't want to lose my job. \n \n I have a
__lowercase =model.generate(_lowerCAmelCase , do_sample=_lowerCAmelCase)
self.assertListEqual(output_ids[0].tolist() , _lowerCAmelCase)
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase = {
"""configuration_mask2former""": [
"""MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Mask2FormerConfig""",
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ["""Mask2FormerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Mask2FormerForUniversalSegmentation""",
"""Mask2FormerModel""",
"""Mask2FormerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 454 | 0 |
def lowercase ( __A : int = 200_0000 ) -> int:
'''simple docstring'''
snake_case : List[str] = [0 for i in range(n + 1 )]
snake_case : Optional[Any] = 1
snake_case : Tuple = 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , __A ):
snake_case : Optional[int] = 1
snake_case : List[str] = 0
for i in range(__A ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 36 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
__magic_name__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
__magic_name__ = json.load(f)
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__):
return FSMTTokenizer.from_pretrained(lowerCAmelCase__)
def snake_case_ ( self , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = FSMTForConditionalGeneration.from_pretrained(lowerCAmelCase__).to(lowerCAmelCase__)
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
])
@slow
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
__SCREAMING_SNAKE_CASE = f"facebook/wmt19-{pair}"
__SCREAMING_SNAKE_CASE = self.get_tokenizer(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = self.get_model(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = bleu_data[pair]["""src"""]
__SCREAMING_SNAKE_CASE = bleu_data[pair]["""tgt"""]
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""pt""" , truncation=lowerCAmelCase__ , padding="""longest""").to(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(
lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ , clean_up_tokenization_spaces=lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = calculate_bleu(lowerCAmelCase__ , lowerCAmelCase__)
print(lowerCAmelCase__)
self.assertGreaterEqual(scores["""bleu"""] , lowerCAmelCase__)
| 155 | 0 |
"""simple docstring"""
class _lowerCAmelCase :
def __init__( self : List[Any] ) -> List[Any]:
"""simple docstring"""
lowercase = {}
def _lowerCAmelCase ( self : List[str] ) -> None:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(a , ''' -> ''' , ''' -> '''.join([str(a ) for j in self.vertex[i]] ) )
def _lowerCAmelCase ( self : Optional[int] , a : int , a : int ) -> None:
"""simple docstring"""
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(a )
else:
# else make a new vertex
lowercase = [to_vertex]
def _lowerCAmelCase ( self : int ) -> None:
"""simple docstring"""
# visited array for storing already visited nodes
lowercase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(a , a )
def _lowerCAmelCase ( self : Dict , a : int , a : list ) -> None:
"""simple docstring"""
# mark start vertex as visited
lowercase = True
print(a , end=''' ''' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(a , a )
if __name__ == "__main__":
__lowerCAmelCase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 396 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def A_ ( __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ):
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
lowercase = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' )
lowercase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
lowercase = transform(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase )
return image
def A_ ( __UpperCamelCase : str ):
if "visual_encoder" in key:
lowercase = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , __UpperCamelCase )
if "blocks" in key:
lowercase = re.sub(R'''blocks''' , '''layers''' , __UpperCamelCase )
if "attn" in key:
lowercase = re.sub(R'''attn''' , '''self_attn''' , __UpperCamelCase )
if "norm1" in key:
lowercase = re.sub(R'''norm1''' , '''layer_norm1''' , __UpperCamelCase )
if "norm2" in key:
lowercase = re.sub(R'''norm2''' , '''layer_norm2''' , __UpperCamelCase )
if "encoder.norm" in key:
lowercase = re.sub(R'''encoder.norm''' , '''post_layernorm''' , __UpperCamelCase )
if "encoder.patch_embed.proj" in key:
lowercase = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , __UpperCamelCase )
if "encoder.pos_embed" in key:
lowercase = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , __UpperCamelCase )
if "encoder.cls_token" in key:
lowercase = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , __UpperCamelCase )
if "self_attn" in key:
lowercase = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , __UpperCamelCase )
return key
@torch.no_grad()
def A_ ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None ):
if config_path is not None:
lowercase = BlipConfig.from_pretrained(__UpperCamelCase )
else:
lowercase = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
lowercase = BlipForConditionalGeneration(__UpperCamelCase ).eval()
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
lowercase = blip_decoder(pretrained=__UpperCamelCase , image_size=3_84 , vit='''base''' )
lowercase = pt_model.eval()
lowercase = pt_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
hf_model.load_state_dict(__UpperCamelCase )
lowercase = 3_84
lowercase = load_demo_image(image_size=__UpperCamelCase , device='''cpu''' )
lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
lowercase = tokenizer(['''a picture of'''] ).input_ids
lowercase = hf_model.generate(__UpperCamelCase , __UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
lowercase = hf_model.generate(__UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
lowercase = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
lowercase = blip_vqa(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit='''base''' )
vqa_model.eval()
lowercase = vqa_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
lowercase = BlipForQuestionAnswering(__UpperCamelCase )
hf_vqa_model.load_state_dict(__UpperCamelCase )
lowercase = ['''How many dogs are in this image?''']
lowercase = tokenizer(__UpperCamelCase , return_tensors='''pt''' ).input_ids
lowercase = hf_vqa_model.generate(__UpperCamelCase , __UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
lowercase = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
lowercase = blip_itm(pretrained=__UpperCamelCase , image_size=__UpperCamelCase , vit='''base''' )
itm_model.eval()
lowercase = itm_model.state_dict()
for key in modified_state_dict.copy():
lowercase = modified_state_dict.pop(__UpperCamelCase )
lowercase = rename_key(__UpperCamelCase )
lowercase = value
lowercase = BlipForImageTextRetrieval(__UpperCamelCase )
lowercase = ['''A picture of a woman with a dog sitting in a beach''']
lowercase = tokenizer(
__UpperCamelCase , return_tensors='''pt''' , padding='''max_length''' , truncation=__UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__UpperCamelCase )
hf_itm_model.eval()
lowercase = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
lowercase = hf_itm_model(__UpperCamelCase , __UpperCamelCase , use_itm_head=__UpperCamelCase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
__lowerCAmelCase = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 396 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case__ ( self ) -> int:
A__ = "laion/clap-htsat-unfused"
A__ = tempfile.mkdtemp()
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> List[str]:
return RobertaTokenizer.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self , **SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def snake_case__ ( self ) -> Optional[int]:
A__ = self.get_tokenizer()
A__ = self.get_feature_extractor()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(self.tmpdirname )
A__ = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> int:
A__ = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
A__ = self.get_feature_extractor(do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
A__ = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=SCREAMING_SNAKE_CASE__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE__ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.get_feature_extractor()
A__ = self.get_tokenizer()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
A__ = floats_list((3, 1000) )
A__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" )
A__ = processor(audios=SCREAMING_SNAKE_CASE__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def snake_case__ ( self ) -> str:
A__ = self.get_feature_extractor()
A__ = self.get_tokenizer()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
A__ = "This is a test string"
A__ = processor(text=SCREAMING_SNAKE_CASE__ )
A__ = tokenizer(SCREAMING_SNAKE_CASE__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def snake_case__ ( self ) -> int:
A__ = self.get_feature_extractor()
A__ = self.get_tokenizer()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.batch_decode(SCREAMING_SNAKE_CASE__ )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Dict:
A__ = self.get_feature_extractor()
A__ = self.get_tokenizer()
A__ = ClapProcessor(tokenizer=SCREAMING_SNAKE_CASE__ , feature_extractor=SCREAMING_SNAKE_CASE__ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="`processor` and `feature_extractor` model input names do not match" , )
| 104 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase__ = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 486 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a_ = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 48 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import add_start_docstrings
a_ = r'''
[`RagConfig`] stores the configuration of a *RagModel*. Configuration objects inherit from [`PretrainedConfig`] and
can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information.
Args:
title_sep (`str`, *optional*, defaults to `" / "`):
Separator inserted between the title and the text of the retrieved document when calling [`RagRetriever`].
doc_sep (`str`, *optional*, defaults to `" // "`):
Separator inserted between the text of the retrieved document and the original input when calling
[`RagRetriever`].
n_docs (`int`, *optional*, defaults to 5):
Number of documents to retrieve.
max_combined_length (`int`, *optional*, defaults to 300):
Max length of contextualized input returned by [`~RagRetriever.__call__`].
retrieval_vector_size (`int`, *optional*, defaults to 768):
Dimensionality of the document embeddings indexed by [`RagRetriever`].
retrieval_batch_size (`int`, *optional*, defaults to 8):
Retrieval batch size, defined as the number of queries issues concurrently to the faiss index encapsulated
[`RagRetriever`].
dataset (`str`, *optional*, defaults to `"wiki_dpr"`):
A dataset identifier of the indexed dataset in HuggingFace Datasets (list all available datasets and ids
using `datasets.list_datasets()`).
dataset_split (`str`, *optional*, defaults to `"train"`)
Which split of the `dataset` to load.
index_name (`str`, *optional*, defaults to `"compressed"`)
The index name of the index associated with the `dataset`. One can choose between `"legacy"`, `"exact"` and
`"compressed"`.
index_path (`str`, *optional*)
The path to the serialized faiss index on disk.
passages_path (`str`, *optional*):
A path to text passages compatible with the faiss index. Required if using
[`~models.rag.retrieval_rag.LegacyIndex`]
use_dummy_dataset (`bool`, *optional*, defaults to `False`)
Whether to load a "dummy" variant of the dataset specified by `dataset`.
label_smoothing (`float`, *optional*, defaults to 0.0):
Only relevant if `return_loss` is set to `True`. Controls the `epsilon` parameter value for label smoothing
in the loss calculation. If set to 0, no label smoothing is performed.
do_marginalize (`bool`, *optional*, defaults to `False`):
If `True`, the logits are marginalized over all documents by making use of
`torch.nn.functional.log_softmax`.
reduce_loss (`bool`, *optional*, defaults to `False`):
Whether or not to reduce the NLL loss using the `torch.Tensor.sum` operation.
do_deduplication (`bool`, *optional*, defaults to `True`):
Whether or not to deduplicate the generations from different context documents for a given input. Has to be
set to `False` if used while training with distributed backend.
exclude_bos_score (`bool`, *optional*, defaults to `False`):
Whether or not to disregard the BOS token when computing the loss.
output_retrieved(`bool`, *optional*, defaults to `False`):
If set to `True`, `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
`context_attention_mask` are returned. See returned tensors for more detail.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
'''
@add_start_docstrings(_UpperCAmelCase)
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
_A : Optional[int] = """rag"""
_A : Optional[Any] = True
def __init__(self , lowercase__=None , lowercase__=True , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=None , lowercase__=" / " , lowercase__=" // " , lowercase__=5 , lowercase__=3_00 , lowercase__=7_68 , lowercase__=8 , lowercase__="wiki_dpr" , lowercase__="train" , lowercase__="compressed" , lowercase__=None , lowercase__=None , lowercase__=False , lowercase__=False , lowercase__=0.0 , lowercase__=True , lowercase__=False , lowercase__=False , lowercase__=False , lowercase__=True , lowercase__=None , **lowercase__ , ):
super().__init__(
bos_token_id=lowercase__ , pad_token_id=lowercase__ , eos_token_id=lowercase__ , decoder_start_token_id=lowercase__ , forced_eos_token_id=lowercase__ , is_encoder_decoder=lowercase__ , prefix=lowercase__ , vocab_size=lowercase__ , **lowercase__ , )
assert (
"question_encoder" in kwargs and "generator" in kwargs
), "Config has to be initialized with question_encoder and generator config"
snake_case_ : List[Any] = kwargs.pop("""question_encoder""" )
snake_case_ : Tuple = question_encoder_config.pop("""model_type""" )
snake_case_ : List[str] = kwargs.pop("""generator""" )
snake_case_ : List[str] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
snake_case_ : List[str] = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : Tuple = AutoConfig.for_model(lowercase__ , **lowercase__ )
snake_case_ : int = reduce_loss
snake_case_ : Optional[int] = label_smoothing
snake_case_ : Dict = exclude_bos_score
snake_case_ : Union[str, Any] = do_marginalize
snake_case_ : Union[str, Any] = title_sep
snake_case_ : int = doc_sep
snake_case_ : int = n_docs
snake_case_ : List[str] = max_combined_length
snake_case_ : Tuple = dataset
snake_case_ : int = dataset_split
snake_case_ : str = index_name
snake_case_ : List[str] = retrieval_vector_size
snake_case_ : Dict = retrieval_batch_size
snake_case_ : str = passages_path
snake_case_ : Union[str, Any] = index_path
snake_case_ : Tuple = use_dummy_dataset
snake_case_ : Dict = output_retrieved
snake_case_ : str = do_deduplication
snake_case_ : Any = use_cache
if self.forced_eos_token_id is None:
snake_case_ : Any = getattr(self.generator , """forced_eos_token_id""" , lowercase__ )
@classmethod
def __UpperCamelCase (cls , lowercase__ , lowercase__ , **lowercase__ ):
return cls(question_encoder=question_encoder_config.to_dict() , generator=generator_config.to_dict() , **lowercase__ )
def __UpperCamelCase (self ):
snake_case_ : Optional[Any] = copy.deepcopy(self.__dict__ )
snake_case_ : Any = self.question_encoder.to_dict()
snake_case_ : Dict = self.generator.to_dict()
snake_case_ : Union[str, Any] = self.__class__.model_type
return output
| 48 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
_lowerCamelCase : str = pd.read_csv("""sample_data.csv""", header=None)
_lowerCamelCase : List[Any] = df.shape[:1][0]
# If you're using some other dataset input the target column
_lowerCamelCase : Tuple = df.iloc[:, 1:2]
_lowerCamelCase : Optional[Any] = actual_data.values.reshape(len_data, 1)
_lowerCamelCase : List[str] = MinMaxScaler().fit_transform(actual_data)
_lowerCamelCase : str = 10
_lowerCamelCase : Optional[int] = 5
_lowerCamelCase : Any = 20
_lowerCamelCase : Optional[int] = len_data - periods * look_back
_lowerCamelCase : Optional[Any] = actual_data[:division]
_lowerCamelCase : Optional[Any] = actual_data[division - look_back :]
_lowerCamelCase , _lowerCamelCase : Optional[int] = [], []
_lowerCamelCase , _lowerCamelCase : List[Any] = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
_lowerCamelCase : Optional[int] = np.array(train_x)
_lowerCamelCase : str = np.array(test_x)
_lowerCamelCase : Tuple = np.array([list(i.ravel()) for i in train_y])
_lowerCamelCase : Tuple = np.array([list(i.ravel()) for i in test_y])
_lowerCamelCase : Any = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
_lowerCamelCase : int = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
_lowerCamelCase : Optional[int] = model.predict(x_test)
| 87 | import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_UpperCAmelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
_UpperCAmelCase = 50003
_UpperCAmelCase = 50002
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __A , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = PLBartTokenizer
lowerCamelCase_ = None
lowerCamelCase_ = False
def lowerCAmelCase_ ( self ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
A_ : str = PLBartTokenizer(lowercase , language_codes='base' , keep_accents=lowercase )
tokenizer.save_pretrained(self.tmpdirname )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Tuple = PLBartTokenizer(lowercase , language_codes='base' , keep_accents=lowercase )
A_ : Union[str, Any] = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
A_ : List[str] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A_ : List[Any] = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
A_ : Tuple = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A_ : Optional[Any] = tokenizer.vocab_size
A_ : Dict = [tokenizer.convert_ids_to_tokens(lowercase ) for x in range(end - 4 , lowercase )]
self.assertListEqual(lowercase , ['__java__', '__python__', '__en_XX__', '<mask>'] )
A_ : Dict = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A_ : Optional[Any] = tokenizer(lowercase ).input_ids
self.assertEqual(
tokenizer.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) , lowercase , )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = PLBartTokenizer(lowercase , language_codes='multi' , keep_accents=lowercase )
A_ : int = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowercase , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowercase ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
A_ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
A_ : int = tokenizer.convert_tokens_to_ids(lowercase )
self.assertListEqual(
lowercase , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
A_ : Tuple = tokenizer.convert_ids_to_tokens(lowercase )
self.assertListEqual(
lowercase , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
] , )
A_ : Any = tokenizer.vocab_size
A_ : int = [tokenizer.convert_ids_to_tokens(lowercase ) for x in range(end - 7 , lowercase )]
self.assertListEqual(
lowercase , ['__java__', '__python__', '__en_XX__', '__javascript__', '__php__', '__ruby__', '__go__'] )
A_ : str = 'java.lang.Exception, python.lang.Exception, javascript, php, ruby, go'
A_ : Dict = tokenizer(lowercase ).input_ids
self.assertEqual(
tokenizer.decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase ) , lowercase , )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ = '''uclanlp/plbart-python-en_XX'''
lowerCamelCase_ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
lowerCamelCase_ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
lowerCamelCase_ = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def lowerCAmelCase_ ( cls ):
"""simple docstring"""
A_ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes='base' , src_lang='python' , tgt_lang='en_XX' )
A_ : Tuple = 1
return cls
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__java__'] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__python__'] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['__en_XX__'] , 5_0_0_0_3 )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : int = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertIn(lowercase , self.tokenizer.all_special_ids )
A_ : List[Any] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
A_ : Tuple = self.tokenizer.decode(lowercase , skip_special_tokens=lowercase )
A_ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowercase )
self.assertEqual(lowercase , lowercase )
self.assertNotIn(self.tokenizer.eos_token , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Any = ['def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])' * 2_0]
self.assertIsInstance(src_text[0] , lowercase )
A_ : int = 1_0
A_ : Dict = self.tokenizer(lowercase , max_length=lowercase , truncation=lowercase ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , lowercase )
self.assertEqual(len(lowercase ) , lowercase )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', '__java__'] ) , [5_0_0_0_4, 5_0_0_0_1] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = tempfile.mkdtemp()
A_ : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowercase )
A_ : Union[str, Any] = PLBartTokenizer.from_pretrained(lowercase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowercase )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowercase , return_tensors='pt' )
A_ : List[str] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , lowercase )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : Optional[int] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
A_ : str = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowercase , lowercase )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
A_ : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowercase )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : str = self.tokenizer(self.src_text , padding=lowercase , truncation=lowercase , max_length=3 , return_tensors='pt' )
A_ : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=lowercase , truncation=lowercase , max_length=1_0 , return_tensors='pt' )
A_ : Optional[int] = targets['input_ids']
A_ : int = shift_tokens_right(lowercase , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def lowerCAmelCase_ ( self ):
"""simple docstring"""
A_ : List[str] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='java' )
self.assertEqual(
nested_simplify(lowercase ) , {
# A, test, EOS, en_XX
'input_ids': [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
'attention_mask': [[1, 1, 1, 1]],
# java
'forced_bos_token_id': 5_0_0_0_1,
} , )
| 558 | 0 |
import multiprocessing
import time
from arguments import PretokenizationArguments
from datasets import load_dataset
from transformers import AutoTokenizer, HfArgumentParser
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict ) -> List[Any]:
'''simple docstring'''
A__ = {}
A__ = tokenizer(example['content'] , truncation=SCREAMING_SNAKE_CASE__ )['input_ids']
A__ = len(example['content'] ) / len(output['input_ids'] )
return output
lowercase_ = HfArgumentParser(PretokenizationArguments)
lowercase_ = parser.parse_args()
if args.num_workers is None:
lowercase_ = multiprocessing.cpu_count()
lowercase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
lowercase_ = time.time()
lowercase_ = load_dataset(args.dataset_name, split="train")
print(f"""Dataset loaded in {time.time()-t_start:.2f}s""")
lowercase_ = time.time()
lowercase_ = ds.map(
tokenize,
num_proc=args.num_workers,
remove_columns=[
"repo_name",
"path",
"copies",
"size",
"content",
"license",
"hash",
"line_mean",
"line_max",
"alpha_frac",
"autogenerated",
],
)
print(f"""Dataset tokenized in {time.time()-t_start:.2f}s""")
lowercase_ = time.time()
ds.push_to_hub(args.tokenized_data_repo)
print(f"""Data pushed to the hub in {time.time()-t_start:.2f}s""")
| 586 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase_ = {
"configuration_m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config", "M2M100OnnxConfig"],
"tokenization_m2m_100": ["M2M100Tokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST",
"M2M100ForConditionalGeneration",
"M2M100Model",
"M2M100PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 586 | 1 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
UpperCamelCase = {
"sample_size": 32,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": 1_000,
"block_out_channels": [32, 64],
"attention_head_dim": 8,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase = {
"sample_size": 64,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 3,
"num_class_embeds": 1_000,
"block_out_channels": [192, 192 * 2, 192 * 3, 192 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "scale_shift",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase = {
"sample_size": 256,
"in_channels": 3,
"out_channels": 3,
"layers_per_block": 2,
"num_class_embeds": None,
"block_out_channels": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"attention_head_dim": 64,
"down_block_types": [
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"ResnetDownsampleBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
"AttnDownBlock2D",
],
"up_block_types": [
"AttnUpBlock2D",
"AttnUpBlock2D",
"AttnUpBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
"ResnetUpsampleBlock2D",
],
"resnet_time_scale_shift": "default",
"upsample_type": "resnet",
"downsample_type": "resnet",
}
UpperCamelCase = {
"num_train_timesteps": 40,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
UpperCamelCase = {
"num_train_timesteps": 201,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
UpperCamelCase = {
"num_train_timesteps": 151,
"sigma_min": 0.0_02,
"sigma_max": 80.0,
}
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> Dict:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> Tuple:
_lowercase : List[str] = checkpoint[F"""{old_prefix}.in_layers.0.weight"""]
_lowercase : int = checkpoint[F"""{old_prefix}.in_layers.0.bias"""]
_lowercase : Optional[int] = checkpoint[F"""{old_prefix}.in_layers.2.weight"""]
_lowercase : Optional[Any] = checkpoint[F"""{old_prefix}.in_layers.2.bias"""]
_lowercase : Optional[Any] = checkpoint[F"""{old_prefix}.emb_layers.1.weight"""]
_lowercase : Optional[int] = checkpoint[F"""{old_prefix}.emb_layers.1.bias"""]
_lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.out_layers.0.weight"""]
_lowercase : List[str] = checkpoint[F"""{old_prefix}.out_layers.0.bias"""]
_lowercase : Optional[int] = checkpoint[F"""{old_prefix}.out_layers.3.weight"""]
_lowercase : Optional[Any] = checkpoint[F"""{old_prefix}.out_layers.3.bias"""]
if has_skip:
_lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.skip_connection.weight"""]
_lowercase : int = checkpoint[F"""{old_prefix}.skip_connection.bias"""]
return new_checkpoint
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=None ) -> List[str]:
_lowercase , _lowercase , _lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 )
_lowercase , _lowercase , _lowercase : Union[str, Any] = checkpoint[F"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 )
_lowercase : str = checkpoint[F"""{old_prefix}.norm.weight"""]
_lowercase : Tuple = checkpoint[F"""{old_prefix}.norm.bias"""]
_lowercase : int = weight_q.squeeze(-1 ).squeeze(-1 )
_lowercase : List[Any] = bias_q.squeeze(-1 ).squeeze(-1 )
_lowercase : int = weight_k.squeeze(-1 ).squeeze(-1 )
_lowercase : Any = bias_k.squeeze(-1 ).squeeze(-1 )
_lowercase : List[str] = weight_v.squeeze(-1 ).squeeze(-1 )
_lowercase : List[str] = bias_v.squeeze(-1 ).squeeze(-1 )
_lowercase : Tuple = (
checkpoint[F"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 )
)
_lowercase : Dict = checkpoint[F"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def __magic_name__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> str:
_lowercase : str = torch.load(SCREAMING_SNAKE_CASE , map_location='cpu' )
_lowercase : str = {}
_lowercase : str = checkpoint['time_embed.0.weight']
_lowercase : Tuple = checkpoint['time_embed.0.bias']
_lowercase : Optional[Any] = checkpoint['time_embed.2.weight']
_lowercase : int = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
_lowercase : Dict = checkpoint['label_emb.weight']
_lowercase : Tuple = checkpoint['input_blocks.0.0.weight']
_lowercase : Optional[Any] = checkpoint['input_blocks.0.0.bias']
_lowercase : Union[str, Any] = unet_config['down_block_types']
_lowercase : Tuple = unet_config['layers_per_block']
_lowercase : Any = unet_config['attention_head_dim']
_lowercase : List[Any] = unet_config['block_out_channels']
_lowercase : Optional[Any] = 1
_lowercase : List[str] = channels_list[0]
for i, layer_type in enumerate(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = channels_list[i]
_lowercase : Tuple = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(SCREAMING_SNAKE_CASE ):
_lowercase : str = F"""down_blocks.{i}.resnets.{j}"""
_lowercase : Dict = F"""input_blocks.{current_layer}.0"""
_lowercase : int = True if j == 0 and downsample_block_has_skip else False
_lowercase : Optional[int] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = F"""down_blocks.{i}.resnets.{j}"""
_lowercase : Union[str, Any] = F"""input_blocks.{current_layer}.0"""
_lowercase : List[Any] = True if j == 0 and downsample_block_has_skip else False
_lowercase : Union[str, Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
_lowercase : Dict = F"""down_blocks.{i}.attentions.{j}"""
_lowercase : int = F"""input_blocks.{current_layer}.1"""
_lowercase : Any = convert_attention(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE ) - 1:
_lowercase : int = F"""down_blocks.{i}.downsamplers.0"""
_lowercase : List[Any] = F"""input_blocks.{current_layer}.0"""
_lowercase : Tuple = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_layer += 1
_lowercase : List[Any] = current_channels
# hardcoded the mid-block for now
_lowercase : List[Any] = 'mid_block.resnets.0'
_lowercase : Union[str, Any] = 'middle_block.0'
_lowercase : Optional[int] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Tuple = 'mid_block.attentions.0'
_lowercase : Any = 'middle_block.1'
_lowercase : str = convert_attention(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = 'mid_block.resnets.1'
_lowercase : Optional[Any] = 'middle_block.2'
_lowercase : List[Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : Optional[int] = 0
_lowercase : List[str] = unet_config['up_block_types']
for i, layer_type in enumerate(SCREAMING_SNAKE_CASE ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
_lowercase : Tuple = F"""up_blocks.{i}.resnets.{j}"""
_lowercase : int = F"""output_blocks.{current_layer}.0"""
_lowercase : str = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE ) - 1:
_lowercase : Dict = F"""up_blocks.{i}.upsamplers.0"""
_lowercase : Optional[Any] = F"""output_blocks.{current_layer-1}.1"""
_lowercase : Dict = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
_lowercase : Tuple = F"""up_blocks.{i}.resnets.{j}"""
_lowercase : List[str] = F"""output_blocks.{current_layer}.0"""
_lowercase : Optional[Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , has_skip=SCREAMING_SNAKE_CASE )
_lowercase : str = F"""up_blocks.{i}.attentions.{j}"""
_lowercase : Dict = F"""output_blocks.{current_layer}.1"""
_lowercase : Optional[Any] = convert_attention(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE ) - 1:
_lowercase : int = F"""up_blocks.{i}.upsamplers.0"""
_lowercase : str = F"""output_blocks.{current_layer-1}.2"""
_lowercase : List[Any] = convert_resnet(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_lowercase : List[Any] = checkpoint['out.0.weight']
_lowercase : Optional[Any] = checkpoint['out.0.bias']
_lowercase : Dict = checkpoint['out.2.weight']
_lowercase : Dict = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
UpperCamelCase = argparse.ArgumentParser()
parser.add_argument("--unet_path", default=None, type=str, required=True, help="Path to the unet.pt to convert.")
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output the converted UNet model."
)
parser.add_argument("--class_cond", default=True, type=str, help="Whether the model is class-conditional.")
UpperCamelCase = parser.parse_args()
UpperCamelCase = strabool(args.class_cond)
UpperCamelCase = os.path.basename(args.unet_path)
print(f'''Checkpoint: {ckpt_name}''')
# Get U-Net config
if "imagenet64" in ckpt_name:
UpperCamelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
UpperCamelCase = TEST_UNET_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
if not args.class_cond:
UpperCamelCase = None
UpperCamelCase = con_pt_to_diffuser(args.unet_path, unet_config)
UpperCamelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
UpperCamelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
UpperCamelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
UpperCamelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f'''Checkpoint type {ckpt_name} is not currently supported.''')
UpperCamelCase = CMStochasticIterativeScheduler(**scheduler_config)
UpperCamelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 66 | '''simple docstring'''
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class a__ :
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
return self.get_dummy_input()
@property
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f"""'{self.block_type}' is not a supported block_type. Set it to 'up', 'mid', or 'down'.""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[Any]=True , a : Dict=False , a : Optional[Any]=False , a : List[Any]=False , ):
"""simple docstring"""
__lowerCamelCase = 4
__lowerCamelCase = 32
__lowerCamelCase = (32, 32)
__lowerCamelCase = torch.manual_seed(0 )
__lowerCamelCase = torch.device(a )
__lowerCamelCase = (batch_size, num_channels) + sizes
__lowerCamelCase = randn_tensor(a , generator=a , device=a )
__lowerCamelCase = {'''hidden_states''': hidden_states}
if include_temb:
__lowerCamelCase = 1_28
__lowerCamelCase = randn_tensor((batch_size, temb_channels) , generator=a , device=a )
if include_res_hidden_states_tuple:
__lowerCamelCase = torch.manual_seed(1 )
__lowerCamelCase = (randn_tensor(a , generator=a , device=a ),)
if include_encoder_hidden_states:
__lowerCamelCase = floats_tensor((batch_size, 32, 32) ).to(a )
if include_skip_sample:
__lowerCamelCase = randn_tensor(((batch_size, 3) + sizes) , generator=a , device=a )
return dummy_input
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
'''temb_channels''': 1_28,
}
if self.block_type == "up":
__lowerCamelCase = 32
if self.block_type == "mid":
init_dict.pop('''out_channels''' )
__lowerCamelCase = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**a )
unet_block.to(a )
unet_block.eval()
with torch.no_grad():
__lowerCamelCase = unet_block(**a )
if isinstance(a , a ):
__lowerCamelCase = output[0]
self.assertEqual(output.shape , self.output_shape )
__lowerCamelCase = output[0, -1, -3:, -3:]
__lowerCamelCase = torch.tensor(a ).to(a )
assert torch_all_close(output_slice.flatten() , a , atol=5e-3 )
@unittest.skipIf(torch_device == '''mps''' , '''Training is not supported in mps''' )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = self.prepare_init_args_and_inputs_for_common()
__lowerCamelCase = self.block_class(**a )
model.to(a )
model.train()
__lowerCamelCase = model(**a )
if isinstance(a , a ):
__lowerCamelCase = output[0]
__lowerCamelCase = torch.device(a )
__lowerCamelCase = randn_tensor(output.shape , device=a )
__lowerCamelCase = torch.nn.functional.mse_loss(a , a )
loss.backward()
| 546 | 0 |
'''simple docstring'''
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = {
"microsoft/xprophetnet-large-wiki100-cased": (
"https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"
),
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Optional[Any] = 'xlm-prophetnet'
__lowerCamelCase : Optional[int] = ['past_key_values']
__lowerCamelCase : List[str] = {
'num_attention_heads': 'num_encoder_attention_heads',
}
def __init__(self , A = 0.1 , A = "gelu" , A = 30_522 , A = 1_024 , A = 4_096 , A = 12 , A = 16 , A = 4_096 , A = 12 , A = 16 , A = 0.1 , A = 0.1 , A = 512 , A = 0.02 , A = True , A = True , A = 0 , A = 2 , A = 32 , A = 128 , A = False , A = 0.0 , A = True , A = 0 , A = 1 , A = 2 , **A , ) -> int:
"""simple docstring"""
_a = vocab_size
_a = hidden_size
_a = encoder_ffn_dim
_a = num_encoder_layers
_a = num_encoder_attention_heads
_a = decoder_ffn_dim
_a = num_decoder_layers
_a = num_decoder_attention_heads
_a = max_position_embeddings
_a = init_std # Normal(0, this parameter)
_a = activation_function
# parameters for xlmprophetnet
_a = ngram
_a = num_buckets
_a = relative_max_distance
_a = disable_ngram_loss
_a = eps
# 3 Types of Dropout
_a = attention_dropout
_a = activation_dropout
_a = dropout
_a = use_cache
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , is_encoder_decoder=A , add_cross_attention=A , decoder_start_token_id=A , **A , )
@property
def a__ (self ) -> int:
"""simple docstring"""
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
raise NotImplementedError(
'''This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and'''
''' `num_decoder_layers`.''' )
| 703 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
lowercase_ = logging.get_logger(__name__)
@add_end_docstrings(
A , R'\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n ' , )
class __A ( A ):
'''simple docstring'''
def a__ (self , A ) -> np.ndarray:
"""simple docstring"""
if self.framework == "tf":
_a = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
_a = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a__ (self , A ) -> np.ndarray:
"""simple docstring"""
_a = self.get_masked_index(A )
_a = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def a__ (self , A ) -> Optional[int]:
"""simple docstring"""
if isinstance(A , A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(A )
def a__ (self , A , A=None , **A ) -> Dict[str, GenericTensor]:
"""simple docstring"""
if return_tensors is None:
_a = self.framework
_a = self.tokenizer(A , return_tensors=A )
self.ensure_exactly_one_mask_token(A )
return model_inputs
def a__ (self , A ) -> Optional[Any]:
"""simple docstring"""
_a = self.model(**A )
_a = model_inputs['''input_ids''']
return model_outputs
def a__ (self , A , A=5 , A=None ) -> Tuple:
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
_a = target_ids.shape[0]
_a = model_outputs['''input_ids'''][0]
_a = model_outputs['''logits''']
if self.framework == "tf":
_a = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
_a = outputs.numpy()
_a = outputs[0, masked_index, :]
_a = stable_softmax(A , axis=-1 )
if target_ids is not None:
_a = tf.gather_nd(tf.squeeze(A , 0 ) , target_ids.reshape(-1 , 1 ) )
_a = tf.expand_dims(A , 0 )
_a = tf.math.top_k(A , k=A )
_a , _a = topk.values.numpy(), topk.indices.numpy()
else:
_a = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
_a = outputs[0, masked_index, :]
_a = logits.softmax(dim=-1 )
if target_ids is not None:
_a = probs[..., target_ids]
_a , _a = probs.topk(A )
_a = []
_a = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
_a = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
_a = input_ids.numpy().copy()
if target_ids is not None:
_a = target_ids[p].tolist()
_a = p
# Filter padding out:
_a = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
_a = self.tokenizer.decode(A , skip_special_tokens=A )
_a = {'''score''': v, '''token''': p, '''token_str''': self.tokenizer.decode([p] ), '''sequence''': sequence}
row.append(A )
result.append(A )
if single_mask:
return result[0]
return result
def a__ (self , A , A=None ) -> Optional[Any]:
"""simple docstring"""
if isinstance(A , A ):
_a = [targets]
try:
_a = self.tokenizer.get_vocab()
except Exception:
_a = {}
_a = []
for target in targets:
_a = vocab.get(A , A )
if id_ is None:
_a = self.tokenizer(
A , add_special_tokens=A , return_attention_mask=A , return_token_type_ids=A , max_length=1 , truncation=A , )['''input_ids''']
if len(A ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
_a = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
_a = list(set(A ) )
if len(A ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
_a = np.array(A )
return target_ids
def a__ (self , A=None , A=None ) -> List[str]:
"""simple docstring"""
_a = {}
if targets is not None:
_a = self.get_target_ids(A , A )
_a = target_ids
if top_k is not None:
_a = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__(self , A , *A , **A ) -> str:
"""simple docstring"""
_a = super().__call__(A , **A )
if isinstance(A , A ) and len(A ) == 1:
return outputs[0]
return outputs
| 352 | 0 |
import math
from collections.abc import Iterator
from itertools import takewhile
def _a ( lowercase__ : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = 2
while True:
if is_prime(lowercase__ ):
yield num
num += 1
def _a ( lowercase__ : int = 2_00_00_00 ):
'''simple docstring'''
return sum(takewhile(lambda lowercase__ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
UpperCAmelCase__ = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
UpperCAmelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1E-3 ) )
@slow
def UpperCAmelCase ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
UpperCAmelCase__ = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
UpperCAmelCase__ = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
UpperCAmelCase__ = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
UpperCAmelCase__ = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
UpperCAmelCase__ = model(lowerCamelCase_ )["""last_hidden_state"""].detach()
self.assertEqual(output.shape , lowerCamelCase_ )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , lowerCamelCase_ , atol=1E-3 ) )
| 392 | 0 |
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = """facebook/bart-large-mnli"""
a : str = (
"""This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which """
"""should be the text to classify, and `labels`, which should be the list of labels to use for classification. """
"""It returns the most likely label in the list of provided `labels` for the input text."""
)
a : int = """text_classifier"""
a : int = AutoTokenizer
a : Tuple = AutoModelForSequenceClassification
a : str = ["""text""", ["""text"""]]
a : Union[str, Any] = ["""text"""]
def UpperCAmelCase_ ( self ) -> str:
super().setup()
__lowerCAmelCase = self.model.config
__lowerCAmelCase = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("entail" ):
__lowerCAmelCase = int(UpperCamelCase )
if self.entailment_id == -1:
raise ValueError("Could not determine the entailment ID from the model config, please pass it at init." )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
__lowerCAmelCase = labels
return self.pre_processor(
[text] * len(UpperCamelCase ) , [F'''This example is {label}''' for label in labels] , return_tensors="pt" , padding="max_length" , )
def UpperCAmelCase_ ( self , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = outputs.logits
__lowerCAmelCase = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id] | 707 |
'''simple docstring'''
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
lowerCAmelCase : List[Any] = get_logger(__name__)
class UpperCAmelCase__ :
def __init__( self , UpperCamelCase = None ) -> Union[str, Any]:
__lowerCAmelCase = (
os.path.join(UpperCamelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
__lowerCAmelCase = Extractor
def UpperCAmelCase_ ( self , UpperCamelCase ) -> str:
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
__lowerCAmelCase = os.path.abspath(UpperCamelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(UpperCamelCase ) )
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase ) -> bool:
return force_extract or (
not os.path.isfile(UpperCamelCase ) and not (os.path.isdir(UpperCamelCase ) and os.listdir(UpperCamelCase ))
)
def UpperCAmelCase_ ( self , UpperCamelCase , UpperCamelCase = False ) -> str:
__lowerCAmelCase = self.extractor.infer_extractor_format(UpperCamelCase )
if not extractor_format:
return input_path
__lowerCAmelCase = self._get_output_path(UpperCamelCase )
if self._do_extract(UpperCamelCase , UpperCamelCase ):
self.extractor.extract(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return output_path
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
@abstractmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
...
@staticmethod
@abstractmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
...
class UpperCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ ):
a : List[bytes] = []
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> List[Any]:
with open(UpperCamelCase , "rb" ) as f:
return f.read(UpperCamelCase )
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if not magic_number:
__lowerCAmelCase = max(len(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
try:
__lowerCAmelCase = cls.read_magic_number(UpperCamelCase , UpperCamelCase )
except OSError:
return False
return any(magic_number.startswith(UpperCamelCase ) for cls_magic_number in cls.magic_numbers )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , **UpperCamelCase ) -> bool:
return tarfile.is_tarfile(UpperCamelCase )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
def resolved(UpperCamelCase ) -> str:
return os.path.realpath(os.path.abspath(UpperCamelCase ) )
def badpath(UpperCamelCase , UpperCamelCase ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(UpperCamelCase , UpperCamelCase ) ).startswith(UpperCamelCase )
def badlink(UpperCamelCase , UpperCamelCase ) -> bool:
# Links are interpreted relative to the directory containing the link
__lowerCAmelCase = resolved(os.path.join(UpperCamelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=UpperCamelCase )
__lowerCAmelCase = resolved(UpperCamelCase )
for finfo in members:
if badpath(finfo.name , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked (illegal path)''' )
elif finfo.issym() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''' )
elif finfo.islnk() and badlink(UpperCamelCase , UpperCamelCase ):
logger.error(F'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''' )
else:
yield finfo
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = tarfile.open(UpperCamelCase )
tar_file.extractall(UpperCamelCase , members=TarExtractor.safemembers(UpperCamelCase , UpperCamelCase ) )
tar_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x1F\x8B"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with gzip.open(UpperCamelCase , "rb" ) as gzip_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : List[Any] = [
B"""PK\x03\x04""",
B"""PK\x05\x06""", # empty archive
B"""PK\x07\x08""", # spanned archive
]
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = b"" ) -> bool:
if super().is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(UpperCamelCase , "rb" ) as fp:
__lowerCAmelCase = _EndRecData(UpperCamelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
__lowerCAmelCase = fp.read(UpperCamelCase ) # CD is where we expect it to be
if len(UpperCamelCase ) == sizeCentralDir:
__lowerCAmelCase = struct.unpack(UpperCamelCase , UpperCamelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with zipfile.ZipFile(UpperCamelCase , "r" ) as zip_file:
zip_file.extractall(UpperCamelCase )
zip_file.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Tuple = [B"""\xFD\x37\x7A\x58\x5A\x00"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with lzma.open(UpperCamelCase ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : str = [B"""Rar!\x1a\x07\x00""", B"""Rar!\x1a\x07\x01\x00"""] # RAR_ID # RAR5_ID
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.RARFILE_AVAILABLE:
raise ImportError("Please pip install rarfile" )
import rarfile
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
__lowerCAmelCase = rarfile.RarFile(UpperCamelCase )
rf.extractall(UpperCamelCase )
rf.close()
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : int = [B"""\x28\xb5\x2F\xFD"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("Please pip install zstandard" )
import zstandard as zstd
__lowerCAmelCase = zstd.ZstdDecompressor()
with open(UpperCamelCase , "rb" ) as ifh, open(UpperCamelCase , "wb" ) as ofh:
dctx.copy_stream(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x42\x5A\x68"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
with bza.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x37\x7A\xBC\xAF\x27\x1C"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.PY7ZR_AVAILABLE:
raise ImportError("Please pip install py7zr" )
import pyazr
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
with pyazr.SevenZipFile(UpperCamelCase , "r" ) as archive:
archive.extractall(UpperCamelCase )
class UpperCAmelCase__ ( UpperCamelCase__ ):
a : Any = [B"""\x04\x22\x4D\x18"""]
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> None:
if not config.LZ4_AVAILABLE:
raise ImportError("Please pip install lz4" )
import lza.frame
with lza.frame.open(UpperCamelCase , "rb" ) as compressed_file:
with open(UpperCamelCase , "wb" ) as extracted_file:
shutil.copyfileobj(UpperCamelCase , UpperCamelCase )
class UpperCAmelCase__ :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def UpperCAmelCase_ ( cls ) -> Optional[Any]:
return max(
len(UpperCamelCase )
for extractor in cls.extractors.values()
if issubclass(UpperCamelCase , UpperCamelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ) -> Dict:
try:
return MagicNumberBaseExtractor.read_magic_number(UpperCamelCase , magic_number_length=UpperCamelCase )
except OSError:
return b""
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase = False ) -> bool:
warnings.warn(
"Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'infer_extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = cls.infer_extractor_format(UpperCamelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase ) -> str: # <Added version="2.4.0"/>
__lowerCAmelCase = cls._get_magic_number_max_length()
__lowerCAmelCase = cls._read_magic_number(UpperCamelCase , UpperCamelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(UpperCamelCase , magic_number=UpperCamelCase ):
return extractor_format
@classmethod
def UpperCAmelCase_ ( cls , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = "deprecated" , ) -> None:
os.makedirs(os.path.dirname(UpperCamelCase ) , exist_ok=UpperCamelCase )
# Prevent parallel extractions
__lowerCAmelCase = str(Path(UpperCamelCase ).with_suffix(".lock" ) )
with FileLock(UpperCamelCase ):
shutil.rmtree(UpperCamelCase , ignore_errors=UpperCamelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(UpperCamelCase , UpperCamelCase ): # passed as positional arg
warnings.warn(
"Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. "
"Use 'extractor_format' instead." , category=UpperCamelCase , )
__lowerCAmelCase = extractor if extractor != "deprecated" else extractor_format
else:
__lowerCAmelCase = cls.extractors[extractor_format]
return extractor.extract(UpperCamelCase , UpperCamelCase )
else:
warnings.warn(
"Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an "
"exception in 3.0.0." , category=UpperCamelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(UpperCamelCase ):
return extractor.extract(UpperCamelCase , UpperCamelCase ) | 39 | 0 |
from __future__ import annotations
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 0.0_0
lowerCAmelCase_ = 0
for resistor in resistors:
if resistor <= 0:
lowerCAmelCase_ = f"Resistor at index {index} has a negative or zero value!"
raise ValueError(_A )
first_sum += 1 / float(_A )
index += 1
return 1 / first_sum
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = 0.0_0
lowerCAmelCase_ = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
lowerCAmelCase_ = f"Resistor at index {index} has a negative value!"
raise ValueError(_A )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 431 |
_A = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def __UpperCamelCase ( _A ):
lowerCAmelCase_ = {'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowerCAmelCase_ = Stack()
lowerCAmelCase_ = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_A ) )
elif i in operators:
# RULE 2
operator_stack.push(_A )
elif i == ")":
# RULE 4
lowerCAmelCase_ = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase_ = operators[opr](_A , _A )
operand_stack.push(_A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
_A = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(f"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 431 | 1 |
'''simple docstring'''
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
UpperCamelCase =[
"Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of the"
" final seconds on board Flight 9525. The Germanwings co-pilot says he had a \"previous episode of severe"
" depression\" German airline confirms it knew of Andreas Lubitz's depression years before he took control.",
"The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal"
" accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC's"
" founding Rome Statute in January. Israel and the United States opposed the Palestinians' efforts to join the"
" body.",
"Amnesty International releases its annual report on the death penalty. The report catalogs the use of"
" state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the"
" world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital"
" punishment.",
]
UpperCamelCase =[
"Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports ."
" Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz"
" had informed his Lufthansa training school of an episode of severe depression, airline says .",
"Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June ."
" Israel and the United States opposed the move, which could open the door to war crimes investigations against"
" Israelis .",
"Amnesty's annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to"
" death . Organization claims that governments around the world are using the threat of terrorism to advance"
" executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death"
" sentences up by 28% .",
]
def snake_case ( ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : Tuple = calculate_rouge(a_ , a_ , bootstrap_aggregation=a_ , rouge_keys=["""rouge2""", """rougeL"""] )
assert isinstance(a_ , a_ )
UpperCamelCase_ : Tuple = calculate_rouge(a_ , a_ , bootstrap_aggregation=a_ , rouge_keys=["""rouge2"""] )
assert (
pd.DataFrame(no_aggregation["""rouge2"""] ).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""] ).fmeasure.mean()
)
def snake_case ( ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Optional[Any] = """rougeLsum"""
UpperCamelCase_ : Optional[Any] = calculate_rouge(a_ , a_ , newline_sep=a_ , rouge_keys=[k] )[k]
UpperCamelCase_ : Optional[int] = calculate_rouge(a_ , a_ , newline_sep=a_ , rouge_keys=[k] )[k]
assert score > score_no_sep
def snake_case ( ) -> str:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = ["""rouge1""", """rouge2""", """rougeL"""]
UpperCamelCase_ : int = calculate_rouge(a_ , a_ , newline_sep=a_ , rouge_keys=a_ )
UpperCamelCase_ : Optional[int] = calculate_rouge(a_ , a_ , newline_sep=a_ , rouge_keys=a_ )
assert score_sep == score_no_sep
def snake_case ( ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Any = [
"""Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports .""",
]
UpperCamelCase_ : Optional[int] = [
"""Margot Frank, died in 1945, a month earlier than previously thought.""",
"""Prosecutor: \"No videos were used in the crash investigation\" German papers say they saw a cell phone video of"""
""" the final seconds on board Flight 9525.""",
]
assert calculate_rouge(a_ , a_ , newline_sep=a_ ) == calculate_rouge(a_ , a_ , newline_sep=a_ )
def snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Tuple = [
"""\" \"a person who has such a video needs to immediately give it to the investigators,\" prosecutor says .<n> \"it is a very disturbing scene,\" editor-in-chief of bild online tells \"erin burnett: outfront\" """
]
UpperCamelCase_ : Optional[Any] = [
""" Marseille prosecutor says \"so far no videos were used in the crash investigation\" despite media reports . Journalists at Bild and Paris Match are \"very confident\" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says ."""
]
UpperCamelCase_ : int = calculate_rouge(a_ , a_ , rouge_keys=["""rougeLsum"""] , newline_sep=a_ )["""rougeLsum"""]
UpperCamelCase_ : Tuple = calculate_rouge(a_ , a_ , rouge_keys=["""rougeLsum"""] )["""rougeLsum"""]
assert new_score > prev_score
def snake_case ( ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = Path("""examples/seq2seq/test_data/wmt_en_ro""" )
UpperCamelCase_ : Tuple = calculate_rouge_path(data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) )
assert isinstance(a_ , a_ )
UpperCamelCase_ : List[Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""" ) , data_dir.joinpath("""test.target""" ) , bootstrap_aggregation=a_ )
assert isinstance(a_ , a_ )
| 701 |
'''simple docstring'''
import inspect
import os
import unittest
from dataclasses import dataclass
import torch
from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs
from accelerate.state import AcceleratorState
from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu
from accelerate.utils import KwargsHandler
@dataclass
class A ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
__a : int = 0
__a : bool = False
__a : float = 3.0
class A ( unittest.TestCase ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
self.assertDictEqual(MockClass().to_kwargs() , {} )
self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} )
self.assertDictEqual(MockClass(a=2 , b=__lowerCAmelCase ).to_kwargs() , {"""a""": 2, """b""": True} )
self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} )
@require_cuda
def _UpperCAmelCase ( self ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
UpperCamelCase_ : int = GradScalerKwargs(init_scale=10_24 , growth_factor=2 )
AcceleratorState._reset_state()
UpperCamelCase_ : Dict = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] )
print(accelerator.use_fpaa )
UpperCamelCase_ : Any = accelerator.scaler
# Check the kwargs have been applied
self.assertEqual(scaler._init_scale , 10_24.0 )
self.assertEqual(scaler._growth_factor , 2.0 )
# Check the other values are at the default
self.assertEqual(scaler._backoff_factor , 0.5 )
self.assertEqual(scaler._growth_interval , 20_00 )
self.assertEqual(scaler._enabled , __lowerCAmelCase )
@require_multi_gpu
def _UpperCAmelCase ( self ):
UpperCamelCase_ : Optional[int] = ["""torchrun""", F"--nproc_per_node={torch.cuda.device_count()}", inspect.getfile(self.__class__ )]
execute_subprocess_async(__lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase =DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True)
UpperCamelCase =Accelerator(kwargs_handlers=[ddp_scaler])
UpperCamelCase =torch.nn.Linear(100, 200)
UpperCamelCase =accelerator.prepare(model)
# Check the values changed in kwargs
UpperCamelCase =""
UpperCamelCase =model.bucket_bytes_cap // (1024 * 1024)
if observed_bucket_cap_map != 15:
error_msg += f"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n"
if model.find_unused_parameters is not True:
error_msg += f"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n"
# Check the values of the defaults
if model.dim != 0:
error_msg += f"Default value not respected, should have `0` but found {model.dim}.\n"
if model.broadcast_buffers is not True:
error_msg += f"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n"
if model.gradient_as_bucket_view is not False:
error_msg += f"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n"
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 543 | 0 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _lowerCAmelCase :
__UpperCAmelCase : str = field(
metadata={'''help''': '''The output directory where the model will be written.'''} , )
__UpperCAmelCase : str = field(
metadata={
'''help''': (
'''The encoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train an encoder model from scratch.'''
)
} , )
__UpperCAmelCase : str = field(
metadata={
'''help''': (
'''The decoder model checkpoint for weights initialization.'''
'''Don\'t set if you want to train a decoder model from scratch.'''
)
} , )
__UpperCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained encoder config name or path if not the same as encoder_model_name'''} )
__UpperCAmelCase : Optional[str] = field(
default=snake_case_ , metadata={'''help''': '''Pretrained decoder config name or path if not the same as decoder_model_name'''} )
def __lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
snake_case : Tuple = HfArgumentParser((ModelArguments,) )
((snake_case) ,) : str = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
snake_case : str = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
snake_case : Tuple = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
snake_case : Any = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
snake_case : int = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
snake_case : Dict = True
snake_case : Union[str, Any] = True
snake_case : int = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=lowercase , decoder_config=lowercase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
snake_case : Union[str, Any] = decoder_config.decoder_start_token_id
snake_case : Tuple = decoder_config.pad_token_id
if decoder_start_token_id is None:
snake_case : str = decoder_config.bos_token_id
if pad_token_id is None:
snake_case : Dict = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
snake_case : str = decoder_config.eos_token_id
snake_case : int = decoder_start_token_id
snake_case : List[str] = pad_token_id
snake_case : Dict = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
snake_case : str = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main()
| 178 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__snake_case = ["""gpt2"""]
__snake_case = """gpt2"""
if is_tf_available():
class _lowerCAmelCase ( tf.Module ):
def __init__( self , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
snake_case : Dict = tokenizer
snake_case : List[str] = AutoConfig.from_pretrained(UpperCamelCase__ )
snake_case : List[str] = TFGPTaLMHeadModel.from_config(UpperCamelCase__ )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name="text" ),) )
def lowerCamelCase ( self , UpperCamelCase__ ) -> Optional[int]:
'''simple docstring'''
snake_case : List[Any] = self.tokenizer(UpperCamelCase__ )
snake_case : Tuple = tokenized["input_ids"].to_tensor()
snake_case : str = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
snake_case : int = self.model(input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ )["logits"]
return outputs
@require_tf
@require_keras_nlp
class _lowerCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
snake_case : Optional[int] = [GPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
snake_case : str = [TFGPTaTokenizer.from_pretrained(UpperCamelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
snake_case : Optional[int] = [
"This is a straightforward English test sentence.",
"This one has some weird characters\rto\nsee\r\nif those\u00E9break things.",
"Now we're going to add some Chinese: 一 二 三 一二三",
"And some much more rare Chinese: 齉 堃 齉堃",
"Je vais aussi écrire en français pour tester les accents",
"Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ",
]
snake_case : str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def lowerCamelCase ( self ) -> List[Any]:
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
snake_case : Any = tokenizer([test_inputs] , return_tensors="tf" )
snake_case : str = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
snake_case : Any = python_outputs[key].numpy()
snake_case : Any = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(UpperCamelCase__ , tf.intaa ) == tf_outputs_values ) )
@slow
def lowerCamelCase ( self ) -> str:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : str = tf.function(UpperCamelCase__ )
for test_inputs in self.test_sentences:
snake_case : str = tf.constant(UpperCamelCase__ )
snake_case : Optional[int] = compiled_tokenizer(UpperCamelCase__ )
snake_case : Dict = tf_tokenizer(UpperCamelCase__ )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def lowerCamelCase ( self ) -> Any:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Tuple = ModelToSave(tokenizer=UpperCamelCase__ )
snake_case : Tuple = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : Union[str, Any] = model.serving(UpperCamelCase__ ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
snake_case : int = Path(UpperCamelCase__ ) / "saved.model"
tf.saved_model.save(UpperCamelCase__ , UpperCamelCase__ , signatures={"serving_default": model.serving} )
snake_case : Optional[Any] = tf.saved_model.load(UpperCamelCase__ )
snake_case : Union[str, Any] = loaded_model.signatures["serving_default"](UpperCamelCase__ )["output_0"]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def lowerCamelCase ( self ) -> int:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
snake_case : Any = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[str] = tf_tokenizer(UpperCamelCase__ ) # Build model with some sample inputs
snake_case : Dict = tf_tokenizer.get_config()
snake_case : Union[str, Any] = TFGPTaTokenizer.from_config(UpperCamelCase__ )
snake_case : Optional[Any] = model_from_config(UpperCamelCase__ )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def lowerCamelCase ( self ) -> Tuple:
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
snake_case : List[str] = 12_3123
for max_length in [3, 5, 1024]:
snake_case : str = tf.convert_to_tensor([self.test_sentences[0]] )
snake_case : List[Any] = tf_tokenizer(UpperCamelCase__ , max_length=UpperCamelCase__ )
snake_case : str = out["input_ids"].numpy().shape[1]
assert out_length == max_length
| 178 | 1 |
"""simple docstring"""
from pathlib import Path
import fire
def _lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : int ):
lowercase__ : int = Path(lowerCamelCase__ )
lowercase__ : Optional[Any] = Path(lowerCamelCase__ )
dest_dir.mkdir(exist_ok=lowerCamelCase__ )
for path in src_dir.iterdir():
lowercase__ : List[Any] = [x.rstrip() for x in list(path.open().readlines() )][:n]
lowercase__ : Tuple = dest_dir.joinpath(path.name )
print(lowerCamelCase__ )
dest_path.open("""w""" ).write("""\n""".join(lowerCamelCase__ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 715 |
"""simple docstring"""
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__snake_case = trt.Logger(trt.Logger.WARNING)
__snake_case = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__snake_case = logging.getLogger(__name__)
__snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__snake_case = parser.parse_args()
if args.tokenizer_name:
__snake_case = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__snake_case = args.per_device_eval_batch_size
__snake_case = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__snake_case = True
__snake_case = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__snake_case = 'temp_engine/bert-fp16.engine'
if args.inta:
__snake_case = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__snake_case = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__snake_case = [network.get_input(i) for i in range(network.num_inputs)]
__snake_case = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__snake_case = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__snake_case = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__snake_case = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def _lowerCamelCase ( lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : int , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : str ):
lowercase__ : List[Any] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
lowercase__ : Optional[Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
lowercase__ : Dict = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase__ )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase__ )
# start time
lowercase__ : Optional[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase__ ) for d_inp in d_inputs] + [int(lowerCamelCase__ ), int(lowerCamelCase__ )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
cuda.memcpy_dtoh_async(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
# Synchronize the stream and take time
stream.synchronize()
# end time
lowercase__ : str = time.time()
lowercase__ : Tuple = end_time - start_time
lowercase__ : List[Any] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__snake_case = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__snake_case = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__snake_case = raw_datasets['validation'].column_names
__snake_case = 'question' if 'question' in column_names else column_names[0]
__snake_case = 'context' if 'context' in column_names else column_names[1]
__snake_case = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__snake_case = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
F"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
__snake_case = min(args.max_seq_length, tokenizer.model_max_length)
def _lowerCamelCase ( lowerCamelCase__ : str ):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
lowercase__ : Dict = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
lowercase__ : List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase__ , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase__ , return_offsets_mapping=lowerCamelCase__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
lowercase__ : List[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
lowercase__ : Tuple = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
lowercase__ : Union[str, Any] = tokenized_examples.sequence_ids(lowerCamelCase__ )
lowercase__ : List[str] = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
lowercase__ : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
lowercase__ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
__snake_case = raw_datasets['validation']
# Validation Feature Creation
__snake_case = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__snake_case = default_data_collator
__snake_case = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__snake_case = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Any="eval" ):
# Post-processing: we match the start logits and end logits to answers in the original context.
lowercase__ : List[Any] = postprocess_qa_predictions(
examples=lowerCamelCase__ , features=lowerCamelCase__ , predictions=lowerCamelCase__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
lowercase__ : str = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
lowercase__ : int = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
lowercase__ : str = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase__ , label_ids=lowerCamelCase__ )
__snake_case = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _lowerCamelCase ( lowerCamelCase__ : Tuple ):
return trt.volume(engine.get_binding_shape(lowerCamelCase__ ) ) * engine.get_binding_dtype(lowerCamelCase__ ).itemsize
# Allocate device memory for inputs and outputs.
__snake_case = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__snake_case = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__snake_case = cuda.mem_alloc(h_outputa.nbytes)
__snake_case = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__snake_case = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(F" Num examples = {len(eval_dataset)}")
logger.info(F" Batch size = {args.per_device_eval_batch_size}")
__snake_case = 0.0
__snake_case = 0
__snake_case = timeit.default_timer()
__snake_case = None
for step, batch in enumerate(eval_dataloader):
__snake_case , __snake_case = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__snake_case , __snake_case = outputs
__snake_case = torch.tensor(start_logits)
__snake_case = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__snake_case = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__snake_case = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__snake_case = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__snake_case = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__snake_case = nested_truncate(all_preds, len(eval_dataset))
__snake_case = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1000))
logger.info('Total Number of Inference = %d', niter)
__snake_case = post_processing_function(eval_examples, eval_dataset, all_preds)
__snake_case = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F"Evaluation metrics: {eval_metric}") | 128 | 0 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase : str = logging.get_logger(__name__)
class a ( a__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = '''encoder-decoder'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
def __init__( self , **_lowerCAmelCase ):
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
__SCREAMING_SNAKE_CASE: int = kwargs.pop('''encoder''' )
__SCREAMING_SNAKE_CASE: Dict = encoder_config.pop('''model_type''' )
__SCREAMING_SNAKE_CASE: List[str] = kwargs.pop('''decoder''' )
__SCREAMING_SNAKE_CASE: List[str] = decoder_config.pop('''model_type''' )
from ..auto.configuration_auto import AutoConfig
__SCREAMING_SNAKE_CASE: Optional[int] = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE: int = AutoConfig.for_model(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE: Optional[Any] = True
@classmethod
def snake_case_ ( cls , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
"""simple docstring"""
logger.info('''Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config''' )
__SCREAMING_SNAKE_CASE: List[str] = True
__SCREAMING_SNAKE_CASE: str = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE: List[Any] = self.encoder.to_dict()
__SCREAMING_SNAKE_CASE: List[Any] = self.decoder.to_dict()
__SCREAMING_SNAKE_CASE: List[str] = self.__class__.model_type
return output
| 202 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : Union[str, Any] = '''data2vec-vision'''
def __init__(self , SCREAMING_SNAKE_CASE__=7_68 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=30_72 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=2_24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=[3, 5, 7, 11] , SCREAMING_SNAKE_CASE__=[1, 2, 3, 6] , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=0.4 , SCREAMING_SNAKE_CASE__=2_56 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=2_55 , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Any = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : int = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : List[Any] = patch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_mask_token
SCREAMING_SNAKE_CASE__ : Dict = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = use_relative_position_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE__ : Dict = layer_scale_init_value
SCREAMING_SNAKE_CASE__ : int = drop_path_rate
SCREAMING_SNAKE_CASE__ : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : int = out_indices
SCREAMING_SNAKE_CASE__ : str = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : Any = use_auxiliary_head
SCREAMING_SNAKE_CASE__ : str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : str = auxiliary_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = auxiliary_num_convs
SCREAMING_SNAKE_CASE__ : Dict = auxiliary_concat_input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = semantic_loss_ignore_index
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
__UpperCamelCase : List[Any] = version.parse('''1.11''' )
@property
def __magic_name__ (self ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __magic_name__ (self ) -> float:
"""simple docstring"""
return 1E-4
| 223 | 0 |
import requests
lowerCAmelCase__ = '''YOUR API KEY'''
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ = giphy_api_key ):
"""simple docstring"""
lowercase__ : Union[str, Any] = "+".join(query.split() )
lowercase__ : List[str] = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
lowercase__ : int = requests.get(lowerCamelCase__ ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship''')))
| 717 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class snake_case__(_UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowercase_ = GPTaTokenizer
lowercase_ = GPTaTokenizerFast
lowercase_ = True
lowercase_ = {"""add_prefix_space""": True}
lowercase_ = False
def snake_case ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase__ : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
"<|endoftext|>",
]
lowercase__ : Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE , range(len(SCREAMING_SNAKE_CASE ) ) ) )
lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase__ : List[str] = {"unk_token": "<unk>"}
lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowercase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE ) )
def snake_case ( self : Tuple , **SCREAMING_SNAKE_CASE : int ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : Dict , **SCREAMING_SNAKE_CASE : Union[str, Any] ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE )
def snake_case ( self : List[str] , SCREAMING_SNAKE_CASE : Dict ):
lowercase__ : List[str] = "lower newer"
lowercase__ : Optional[Any] = "lower newer"
return input_text, output_text
def snake_case ( self : Any ):
lowercase__ : Dict = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowercase__ : Dict = "lower newer"
lowercase__ : Optional[Any] = ["\u0120low", "er", "\u0120", "n", "e", "w", "er"]
lowercase__ : Optional[Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowercase__ : Any = tokens + [tokenizer.unk_token]
lowercase__ : str = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
lowercase__ : Dict = self.get_tokenizer()
lowercase__ : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = "lower newer"
# Testing tokenization
lowercase__ : str = tokenizer.tokenize(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : int = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids without special tokens
lowercase__ : Optional[int] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = rust_tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing conversion to ids with special tokens
lowercase__ : List[str] = self.get_rust_tokenizer(add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : List[str] = tokenizer.encode(SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = rust_tokenizer.encode(SCREAMING_SNAKE_CASE )
self.assertListEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Testing the unknown token
lowercase__ : List[Any] = tokens + [rust_tokenizer.unk_token]
lowercase__ : Optional[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
def snake_case ( self : str , *SCREAMING_SNAKE_CASE : List[str] , **SCREAMING_SNAKE_CASE : Optional[Any] ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def snake_case ( self : Optional[Any] , SCREAMING_SNAKE_CASE : int=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
# Simple input
lowercase__ : Dict = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Union[str, Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[int] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Simple input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(SCREAMING_SNAKE_CASE , tokenizer_r.encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" )
# Pair input
self.assertRaises(
SCREAMING_SNAKE_CASE , tokenizer_r.batch_encode_plus , SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , padding="max_length" , )
def snake_case ( self : Any ):
lowercase__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token="<pad>" )
# Simple input
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : List[str] = ["This is a simple input looooooooong", "This is a simple input"]
lowercase__ : List[Any] = ("This is a simple input", "This is a pair")
lowercase__ : Optional[Any] = [
("This is a simple input loooooong", "This is a simple input"),
("This is a simple pair loooooong", "This is a simple pair"),
]
lowercase__ : Any = tokenizer.pad_token_id
lowercase__ : Dict = tokenizer(SCREAMING_SNAKE_CASE , padding="max_length" , max_length=30 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
lowercase__ : List[str] = tokenizer(*SCREAMING_SNAKE_CASE , padding="max_length" , max_length=60 , return_tensors="np" )
lowercase__ : List[str] = tokenizer(SCREAMING_SNAKE_CASE , padding=SCREAMING_SNAKE_CASE , truncate=SCREAMING_SNAKE_CASE , return_tensors="np" )
# s
# test single string max_length padding
self.assertEqual(out_s["input_ids"].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s["input_ids"] )
self.assertTrue(0 in out_s["attention_mask"] )
# s2
# test automatic padding
self.assertEqual(out_sa["input_ids"].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa["input_ids"][0] )
self.assertFalse(0 in out_sa["attention_mask"][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa["input_ids"][1] )
self.assertTrue(0 in out_sa["attention_mask"][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p["input_ids"].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p["input_ids"] )
self.assertTrue(0 in out_p["attention_mask"] )
# p2
# test automatic padding pair
self.assertEqual(out_pa["input_ids"].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa["input_ids"][0] )
self.assertFalse(0 in out_pa["attention_mask"][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa["input_ids"][1] )
self.assertTrue(0 in out_pa["attention_mask"][1] )
def snake_case ( self : str ):
lowercase__ : List[str] = "$$$"
lowercase__ : Dict = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[int] = "This is a simple input"
lowercase__ : Dict = ["This is a simple input 1", "This is a simple input 2"]
lowercase__ : Optional[int] = tokenizer.bos_token_id
lowercase__ : List[Any] = tokenizer(SCREAMING_SNAKE_CASE )
lowercase__ : int = tokenizer(SCREAMING_SNAKE_CASE )
self.assertEqual(out_s.input_ids[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
lowercase__ : List[Any] = tokenizer.decode(out_s.input_ids )
lowercase__ : List[str] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , SCREAMING_SNAKE_CASE )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def snake_case ( self : Optional[int] ):
pass
def snake_case ( self : Tuple ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
lowercase__ : int = [self.get_tokenizer(do_lower_case=SCREAMING_SNAKE_CASE , add_bos_token=SCREAMING_SNAKE_CASE )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
lowercase__ : str = "Encode this."
lowercase__ : List[Any] = "This one too please."
lowercase__ : Dict = tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
encoded_sequence += tokenizer.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE )
lowercase__ : Dict = tokenizer.encode_plus(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE , return_special_tokens_mask=SCREAMING_SNAKE_CASE , )
lowercase__ : Tuple = encoded_sequence_dict["input_ids"]
lowercase__ : int = encoded_sequence_dict["special_tokens_mask"]
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , len(SCREAMING_SNAKE_CASE ) )
lowercase__ : List[str] = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(SCREAMING_SNAKE_CASE )
]
lowercase__ : Any = [x for x in filtered_sequence if x is not None]
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@require_tokenizers
class snake_case__(unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : Union[str, Any] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("test_opt" )
lowercase__ : int = AutoTokenizer.from_pretrained("./test_opt" )
lowercase__ : Dict = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
def snake_case ( self : Union[str, Any] ):
lowercase__ : Any = AutoTokenizer.from_pretrained("facebook/opt-350m" , use_slow=SCREAMING_SNAKE_CASE )
lowercase__ : int = "A photo of a cat"
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# Same as above
self.assertEqual(SCREAMING_SNAKE_CASE , [2, 250, 1_345, 9, 10, 4_758] )
@unittest.skip("This test is failing because of a bug in the fast tokenizer" )
def snake_case ( self : Tuple ):
lowercase__ : str = AutoTokenizer.from_pretrained("facebook/opt-350m" , from_slow=SCREAMING_SNAKE_CASE )
lowercase__ : Optional[Any] = "bos"
lowercase__ : List[Any] = tokenizer.get_vocab()["bos"]
lowercase__ : Optional[Any] = "A photo of a cat"
lowercase__ : Union[str, Any] = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
# We changed the bos token
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
tokenizer.save_pretrained("./tok" )
lowercase__ : Any = AutoTokenizer.from_pretrained("./tok" )
self.assertTrue(tokenizer.is_fast )
lowercase__ : Tuple = tokenizer.encode(
SCREAMING_SNAKE_CASE , )
self.assertEqual(SCREAMING_SNAKE_CASE , [31_957, 250, 1_345, 9, 10, 4_758] )
| 81 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A__ ( UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ : UNetaDModel
UpperCamelCase_ : KarrasVeScheduler
def __init__( self : List[str] , lowerCAmelCase__ : UNetaDModel , lowerCAmelCase__ : KarrasVeScheduler ) -> Optional[Any]:
"""simple docstring"""
super().__init__()
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : Dict , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : int = 5_0 , lowerCAmelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : List[str] , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
_UpperCAmelCase : str = self.unet.config.sample_size
_UpperCAmelCase : Tuple = (batch_size, 3, img_size, img_size)
_UpperCAmelCase : Any = self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_UpperCAmelCase : List[Any] = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device ) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# here sigma_t == t_i from the paper
_UpperCAmelCase : List[str] = self.scheduler.schedule[t]
_UpperCAmelCase : Any = self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_UpperCAmelCase , _UpperCAmelCase : Any = self.scheduler.add_noise_to_input(lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ )
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCAmelCase : Union[str, Any] = (sigma_hat / 2) * model((sample_hat + 1) / 2 , sigma_hat / 2 ).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_UpperCAmelCase : Optional[Any] = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_UpperCAmelCase : List[Any] = (sigma_prev / 2) * model((step_output.prev_sample + 1) / 2 , sigma_prev / 2 ).sample
_UpperCAmelCase : str = self.scheduler.step_correct(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , step_output.prev_sample , step_output["derivative"] , )
_UpperCAmelCase : Any = step_output.prev_sample
_UpperCAmelCase : Optional[Any] = (sample / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : Union[str, Any] = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ ) | 494 | '''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__a = abspath(join(dirname(dirname(dirname(__file__))), 'src'))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='ignore', category=FutureWarning)
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(a_ )
def __UpperCAmelCase ( a_: str ):
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase : Any = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(a_, id=a_ ) | 494 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase__ ( __lowercase , unittest.TestCase):
'''simple docstring'''
_A = MvpTokenizer
_A = MvpTokenizerFast
_A = True
_A = filter_roberta_detectors
def _lowerCamelCase ( self :List[str] ) -> Tuple:
super().setUp()
__UpperCamelCase : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
__UpperCamelCase : Dict = dict(zip(a , range(len(a ) ) ) )
__UpperCamelCase : Any = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
__UpperCamelCase : Dict = {"unk_token": "<unk>"}
__UpperCamelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
__UpperCamelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a ) )
def _lowerCamelCase ( self :Union[str, Any] , **a :str ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self :List[str] , **a :Dict ) -> int:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self :Dict , a :List[Any] ) -> str:
return "lower newer", "lower newer"
@cached_property
def _lowerCamelCase ( self :Any ) -> Union[str, Any]:
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def _lowerCamelCase ( self :int ) -> Optional[Any]:
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def _lowerCamelCase ( self :Union[str, Any] ) -> Union[str, Any]:
__UpperCamelCase : List[str] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
__UpperCamelCase : Optional[int] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase : Dict = tokenizer(a , max_length=len(a ) , padding=a , return_tensors="pt" )
self.assertIsInstance(a , a )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__UpperCamelCase : str = batch.input_ids.tolist()[0]
self.assertListEqual(a , a )
# Test that special tokens are reset
@require_torch
def _lowerCamelCase ( self :List[Any] ) -> Optional[Any]:
__UpperCamelCase : List[Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase : Optional[Any] = tokenizer(a , padding=a , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , a )
self.assertIn("attention_mask" , a )
self.assertNotIn("labels" , a )
self.assertNotIn("decoder_attention_mask" , a )
@require_torch
def _lowerCamelCase ( self :Any ) -> Optional[Any]:
__UpperCamelCase : List[str] = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase : List[Any] = tokenizer(text_target=a , max_length=3_2 , padding="max_length" , return_tensors="pt" )
self.assertEqual(3_2 , targets["input_ids"].shape[1] )
@require_torch
def _lowerCamelCase ( self :Optional[int] ) -> List[str]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase : int = tokenizer(
["I am a small frog" * 1_0_2_4, "I am a small frog"] , padding=a , truncation=a , return_tensors="pt" )
self.assertIsInstance(a , a )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def _lowerCamelCase ( self :Optional[int] ) -> Optional[int]:
__UpperCamelCase : Any = ["A long paragraph for summarization."]
__UpperCamelCase : Any = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__UpperCamelCase : str = tokenizer(a , text_target=a , return_tensors="pt" )
__UpperCamelCase : Tuple = inputs["input_ids"]
__UpperCamelCase : Optional[int] = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _lowerCamelCase ( self :Optional[int] ) -> Dict:
pass
def _lowerCamelCase ( self :str ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
__UpperCamelCase : Dict = self.rust_tokenizer_class.from_pretrained(a , **a )
__UpperCamelCase : Any = self.tokenizer_class.from_pretrained(a , **a )
__UpperCamelCase : List[Any] = "A, <mask> AllenNLP sentence."
__UpperCamelCase : str = tokenizer_r.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
__UpperCamelCase : Tuple = tokenizer_p.encode_plus(a , add_special_tokens=a , return_token_type_ids=a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
__UpperCamelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
__UpperCamelCase : List[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 700 |
from __future__ import annotations
import math
from collections import Counter
from string import ascii_lowercase
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> None:
'''simple docstring'''
__UpperCamelCase , __UpperCamelCase : Optional[Any] = analyze_text(_lowerCamelCase)
__UpperCamelCase : List[str] = list(" " + ascii_lowercase)
# what is our total sum of probabilities.
__UpperCamelCase : Any = sum(single_char_strings.values())
# one length string
__UpperCamelCase : Optional[Any] = 0
# for each alpha we go in our dict and if it is in it we calculate entropy
for ch in my_alphas:
if ch in single_char_strings:
__UpperCamelCase : List[Any] = single_char_strings[ch]
__UpperCamelCase : List[str] = my_str / all_sum
my_fir_sum += prob * math.loga(_lowerCamelCase) # entropy formula.
# print entropy
print(F'{round(-1 * my_fir_sum):.1f}')
# two len string
__UpperCamelCase : Optional[Any] = sum(two_char_strings.values())
__UpperCamelCase : Tuple = 0
# for each alpha (two in size) calculate entropy.
for cha in my_alphas:
for cha in my_alphas:
__UpperCamelCase : List[str] = cha + cha
if sequence in two_char_strings:
__UpperCamelCase : Optional[Any] = two_char_strings[sequence]
__UpperCamelCase : Any = int(_lowerCamelCase) / all_sum
my_sec_sum += prob * math.loga(_lowerCamelCase)
# print second entropy
print(F'{round(-1 * my_sec_sum):.1f}')
# print the difference between them
print(F'{round((-1 * my_sec_sum) - (-1 * my_fir_sum)):.1f}')
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str) -> tuple[dict, dict]:
'''simple docstring'''
__UpperCamelCase : Tuple = Counter() # type: ignore
__UpperCamelCase : Any = Counter() # type: ignore
single_char_strings[text[-1]] += 1
# first case when we have space at start.
two_char_strings[" " + text[0]] += 1
for i in range(0 , len(_lowerCamelCase) - 1):
single_char_strings[text[i]] += 1
two_char_strings[text[i : i + 2]] += 1
return single_char_strings, two_char_strings
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
'''simple docstring'''
import doctest
doctest.testmod()
# text = (
# "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark "
# "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest "
# "jointure saw horrible. He private he on be imagine suppose. Fertile "
# "beloved evident through no service elderly is. Blind there if every no so "
# "at. Own neglected you preferred way sincerity delivered his attempted. To "
# "of message cottage windows do besides against uncivil. Delightful "
# "unreserved impossible few estimating men favourable see entreaties. She "
# "propriety immediate was improving. He or entrance humoured likewise "
# "moderate. Much nor game son say feel. Fat make met can must form into "
# "gate. Me we offending prevailed discovery. "
# )
# calculate_prob(text)
if __name__ == "__main__":
main() | 94 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=lowercase )
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : str = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
_snake_case : ClassVar[Features] = Features({"""image""": Image()} )
_snake_case : ClassVar[Features] = Features({"""labels""": ClassLabel} )
_snake_case : str = "image"
_snake_case : str = "labels"
def __a ( self :Any , lowerCamelCase__ :Dict ):
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , lowerCamelCase__ ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
UpperCamelCase__ :Union[str, Any] = copy.deepcopy(self )
UpperCamelCase__ :Any = self.label_schema.copy()
UpperCamelCase__ :int = features[self.label_column]
UpperCamelCase__ :Optional[Any] = label_schema
return task_template
@property
def __a ( self :List[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
} | 45 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
A_ : Dict = [0 for i in range(r + 1 )]
# nc0 = 1
A_ : Tuple = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
A_ : str = min(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=10, r=5))
| 590 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A_ :
def __init__( self : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any]=13 , __SCREAMING_SNAKE_CASE : List[Any]=32 , __SCREAMING_SNAKE_CASE : Any=3 , __SCREAMING_SNAKE_CASE : List[str]=4 , __SCREAMING_SNAKE_CASE : Dict=[10, 20, 30, 40] , __SCREAMING_SNAKE_CASE : Any=[2, 2, 3, 2] , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Dict="gelu" , __SCREAMING_SNAKE_CASE : Any=10 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : List[Any]=["stage2", "stage3", "stage4"] , __SCREAMING_SNAKE_CASE : Optional[Any]=[2, 3, 4] , __SCREAMING_SNAKE_CASE : List[Any]=None , ):
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = num_stages
__a = hidden_sizes
__a = depths
__a = is_training
__a = use_labels
__a = intermediate_size
__a = hidden_act
__a = num_labels
__a = initializer_range
__a = out_features
__a = out_indices
__a = scope
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : int ):
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any ):
__a = ConvNextVaModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _UpperCAmelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple ):
__a = ConvNextVaForImageClassification(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict ):
__a = ConvNextVaBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a = None
__a = ConvNextVaBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
__a = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _UpperCAmelCase ( self : Tuple ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"pixel_values": pixel_values}
return config, inputs_dict
def _UpperCAmelCase ( self : List[str] ):
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"pixel_values": pixel_values, "labels": labels}
return config, inputs_dict
@require_torch
class A_ ( a_ , a_ , unittest.TestCase ):
_SCREAMING_SNAKE_CASE = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{"""feature-extraction""": ConvNextVaModel, """image-classification""": ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def _UpperCAmelCase ( self : Union[str, Any] ):
__a = ConvNextVaModelTester(self )
__a = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def _UpperCAmelCase ( self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : str ):
return
@unittest.skip(reason="ConvNextV2 does not use inputs_embeds" )
def _UpperCAmelCase ( self : Any ):
pass
@unittest.skip(reason="ConvNextV2 does not support input and output embeddings" )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="ConvNextV2 does not use feedforward chunking" )
def _UpperCAmelCase ( self : Any ):
pass
def _UpperCAmelCase ( self : Union[str, Any] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a = self.model_tester.prepare_config_and_inputs_with_labels()
__a = True
if model_class.__name__ in [
*get_values(__SCREAMING_SNAKE_CASE ),
*get_values(__SCREAMING_SNAKE_CASE ),
]:
continue
__a = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.train()
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
__a = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _UpperCAmelCase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__a , __a = self.model_tester.prepare_config_and_inputs_with_labels()
__a = False
__a = True
if (
model_class.__name__
in [*get_values(__SCREAMING_SNAKE_CASE ), *get_values(__SCREAMING_SNAKE_CASE )]
or not model_class.supports_gradient_checkpointing
):
continue
__a = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.gradient_checkpointing_enable()
model.train()
__a = self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , return_labels=__SCREAMING_SNAKE_CASE )
__a = model(**__SCREAMING_SNAKE_CASE ).loss
loss.backward()
def _UpperCAmelCase ( self : Dict ):
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__SCREAMING_SNAKE_CASE )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Optional[Any] ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
def check_hidden_states_output(__SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ):
__a = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
__a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a = self.model_tester.num_stages
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[Any] ):
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def _UpperCAmelCase ( self : Union[str, Any] ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = ConvNextVaModel.from_pretrained(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def __A ( ):
"""simple docstring"""
__a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ ( unittest.TestCase ):
@cached_property
def _UpperCAmelCase ( self : Dict ):
return AutoImageProcessor.from_pretrained("facebook/convnextv2-tiny-1k-224" ) if is_vision_available() else None
@slow
def _UpperCAmelCase ( self : Optional[int] ):
__a = ConvNextVaForImageClassification.from_pretrained("facebook/convnextv2-tiny-1k-224" ).to(__SCREAMING_SNAKE_CASE )
__a = self.default_image_processor
__a = prepare_img()
__a = preprocessor(images=__SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(__SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__a = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__a = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 525 | from ... import PretrainedConfig
SCREAMING_SNAKE_CASE : Any = {
"""sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""",
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
_SCREAMING_SNAKE_CASE = """nezha"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : List[str]=2_11_28 , __SCREAMING_SNAKE_CASE : Dict=7_68 , __SCREAMING_SNAKE_CASE : List[Any]=12 , __SCREAMING_SNAKE_CASE : str=12 , __SCREAMING_SNAKE_CASE : Optional[Any]=30_72 , __SCREAMING_SNAKE_CASE : Any="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : List[str]=0.1 , __SCREAMING_SNAKE_CASE : int=5_12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=64 , __SCREAMING_SNAKE_CASE : List[Any]=2 , __SCREAMING_SNAKE_CASE : int=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1E-12 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any=0 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : List[Any]=True , **__SCREAMING_SNAKE_CASE : List[str] , ):
super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
__a = vocab_size
__a = hidden_size
__a = num_hidden_layers
__a = num_attention_heads
__a = hidden_act
__a = intermediate_size
__a = hidden_dropout_prob
__a = attention_probs_dropout_prob
__a = max_position_embeddings
__a = max_relative_position
__a = type_vocab_size
__a = initializer_range
__a = layer_norm_eps
__a = classifier_dropout
__a = use_cache
| 525 | 1 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCAmelCase_ )
@slow
def _lowercase ( self : int ) -> Dict:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=lowerCAmelCase_ )
assert mmeta["long_pair"] == "heb-eng"
| 393 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowercase_ = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __A :
'''simple docstring'''
def __init__(self , A = 14 ) -> None:
"""simple docstring"""
if group not in primes:
raise ValueError('''Unsupported Group''' )
_a = primes[group]['''prime''']
_a = primes[group]['''generator''']
_a = int(hexlify(urandom(32 ) ) , base=16 )
def a__ (self ) -> str:
"""simple docstring"""
return hex(self.__private_key )[2:]
def a__ (self ) -> str:
"""simple docstring"""
_a = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def a__ (self , A ) -> bool:
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def a__ (self , A ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def a__ (A , A ) -> bool:
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def a__ (A , A , A = 14 ) -> str:
"""simple docstring"""
_a = int(A , base=16 )
_a = int(A , base=16 )
_a = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError('''Invalid public key''' )
_a = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 11 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class lowerCAmelCase ( UpperCamelCase_ ):
_SCREAMING_SNAKE_CASE : List[str] ="""gptsan-japanese"""
_SCREAMING_SNAKE_CASE : Tuple =[
"""past_key_values""",
]
_SCREAMING_SNAKE_CASE : Tuple ={
"""hidden_size""": """d_model""",
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self , lowerCAmelCase__=36000 , lowerCAmelCase__=1280 , lowerCAmelCase__=1024 , lowerCAmelCase__=8192 , lowerCAmelCase__=4096 , lowerCAmelCase__=128 , lowerCAmelCase__=10 , lowerCAmelCase__=0 , lowerCAmelCase__=16 , lowerCAmelCase__=16 , lowerCAmelCase__=128 , lowerCAmelCase__=0.0 , lowerCAmelCase__=1E-5 , lowerCAmelCase__=False , lowerCAmelCase__=0.0 , lowerCAmelCase__="float32" , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=False , lowerCAmelCase__=0.002 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=35998 , lowerCAmelCase__=35995 , lowerCAmelCase__=35999 , **lowerCAmelCase__ , ):
_A= vocab_size
_A= max_position_embeddings
_A= d_model
_A= d_ff
_A= d_ext
_A= d_spout
_A= num_switch_layers
_A= num_ext_layers
_A= num_switch_layers + num_ext_layers
_A= num_heads
_A= num_experts
_A= expert_capacity
_A= dropout_rate
_A= layer_norm_epsilon
_A= router_bias
_A= router_jitter_noise
_A= router_dtype
_A= router_ignore_padding_tokens
_A= output_hidden_states
_A= output_attentions
_A= initializer_factor
_A= output_router_logits
_A= use_cache
super().__init__(
separator_token_id=__A , pad_token_id=__A , eos_token_id=__A , **__A , ) | 703 | UpperCAmelCase_ = {
'''Pillow''': '''Pillow<10.0.0''',
'''accelerate''': '''accelerate>=0.20.3''',
'''av''': '''av==9.2.0''',
'''beautifulsoup4''': '''beautifulsoup4''',
'''black''': '''black~=23.1''',
'''codecarbon''': '''codecarbon==1.2.0''',
'''cookiecutter''': '''cookiecutter==1.7.3''',
'''dataclasses''': '''dataclasses''',
'''datasets''': '''datasets!=2.5.0''',
'''decord''': '''decord==0.6.0''',
'''deepspeed''': '''deepspeed>=0.9.3''',
'''diffusers''': '''diffusers''',
'''dill''': '''dill<0.3.5''',
'''evaluate''': '''evaluate>=0.2.0''',
'''fairscale''': '''fairscale>0.3''',
'''faiss-cpu''': '''faiss-cpu''',
'''fastapi''': '''fastapi''',
'''filelock''': '''filelock''',
'''flax''': '''flax>=0.4.1,<=0.7.0''',
'''ftfy''': '''ftfy''',
'''fugashi''': '''fugashi>=1.0''',
'''GitPython''': '''GitPython<3.1.19''',
'''hf-doc-builder''': '''hf-doc-builder>=0.3.0''',
'''huggingface-hub''': '''huggingface-hub>=0.14.1,<1.0''',
'''importlib_metadata''': '''importlib_metadata''',
'''ipadic''': '''ipadic>=1.0.0,<2.0''',
'''isort''': '''isort>=5.5.4''',
'''jax''': '''jax>=0.2.8,!=0.3.2,<=0.4.13''',
'''jaxlib''': '''jaxlib>=0.1.65,<=0.4.13''',
'''jieba''': '''jieba''',
'''kenlm''': '''kenlm''',
'''keras-nlp''': '''keras-nlp>=0.3.1''',
'''librosa''': '''librosa''',
'''nltk''': '''nltk''',
'''natten''': '''natten>=0.14.6''',
'''numpy''': '''numpy>=1.17''',
'''onnxconverter-common''': '''onnxconverter-common''',
'''onnxruntime-tools''': '''onnxruntime-tools>=1.4.2''',
'''onnxruntime''': '''onnxruntime>=1.4.0''',
'''opencv-python''': '''opencv-python''',
'''optuna''': '''optuna''',
'''optax''': '''optax>=0.0.8,<=0.1.4''',
'''packaging''': '''packaging>=20.0''',
'''parameterized''': '''parameterized''',
'''phonemizer''': '''phonemizer''',
'''protobuf''': '''protobuf''',
'''psutil''': '''psutil''',
'''pyyaml''': '''pyyaml>=5.1''',
'''pydantic''': '''pydantic<2''',
'''pytest''': '''pytest>=7.2.0''',
'''pytest-timeout''': '''pytest-timeout''',
'''pytest-xdist''': '''pytest-xdist''',
'''python''': '''python>=3.8.0''',
'''ray[tune]''': '''ray[tune]''',
'''regex''': '''regex!=2019.12.17''',
'''requests''': '''requests''',
'''rhoknp''': '''rhoknp>=1.1.0,<1.3.1''',
'''rjieba''': '''rjieba''',
'''rouge-score''': '''rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1''',
'''ruff''': '''ruff>=0.0.241,<=0.0.259''',
'''sacrebleu''': '''sacrebleu>=1.4.12,<2.0.0''',
'''sacremoses''': '''sacremoses''',
'''safetensors''': '''safetensors>=0.3.1''',
'''sagemaker''': '''sagemaker>=2.31.0''',
'''scikit-learn''': '''scikit-learn''',
'''sentencepiece''': '''sentencepiece>=0.1.91,!=0.1.92''',
'''sigopt''': '''sigopt''',
'''starlette''': '''starlette''',
'''sudachipy''': '''sudachipy>=0.6.6''',
'''sudachidict_core''': '''sudachidict_core>=20220729''',
'''tensorflow-cpu''': '''tensorflow-cpu>=2.6,<2.14''',
'''tensorflow''': '''tensorflow>=2.6,<2.14''',
'''tensorflow-text''': '''tensorflow-text<2.14''',
'''tf2onnx''': '''tf2onnx''',
'''timeout-decorator''': '''timeout-decorator''',
'''timm''': '''timm''',
'''tokenizers''': '''tokenizers>=0.11.1,!=0.11.3,<0.14''',
'''torch''': '''torch>=1.9,!=1.12.0''',
'''torchaudio''': '''torchaudio''',
'''torchvision''': '''torchvision''',
'''pyctcdecode''': '''pyctcdecode>=0.4.0''',
'''tqdm''': '''tqdm>=4.27''',
'''unidic''': '''unidic>=1.0.2''',
'''unidic_lite''': '''unidic_lite>=1.0.7''',
'''urllib3''': '''urllib3<2.0.0''',
'''uvicorn''': '''uvicorn''',
} | 476 | 0 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def a__ ( _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : int ) -> List[Any]:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path / "cache"
UpperCAmelCase_ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : int = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = tmp_path / "cache"
UpperCAmelCase_ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : List[str] = features.copy() if features else default_expected_features
UpperCAmelCase_ : Optional[Any] = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Optional[int] = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Optional[Any] = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def a__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Optional[int] = parquet_path
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ : Dict = [parquet_path]
UpperCAmelCase_ : str = tmp_path / "cache"
UpperCAmelCase_ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Union[str, Any] = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def a__ ( _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Optional[Any]=("train",) ) -> Optional[Any]:
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for split in splits:
UpperCAmelCase_ : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase_ : List[Any] = tmp_path / "cache"
UpperCAmelCase_ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCAmelCase_ : Union[str, Any] = ParquetDatasetReader(
{"train": parquet_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ : Any = tmp_path / "cache"
UpperCAmelCase_ : Optional[int] = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : List[str] = features.copy() if features else default_expected_features
UpperCAmelCase_ : str = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCAmelCase_ : Optional[int] = ParquetDatasetReader({"train": parquet_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
if split:
UpperCAmelCase_ : Tuple = {split: parquet_path}
else:
UpperCAmelCase_ : int = "train"
UpperCAmelCase_ : int = {"train": parquet_path, "test": parquet_path}
UpperCAmelCase_ : List[Any] = tmp_path / "cache"
UpperCAmelCase_ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
UpperCAmelCase_ : Any = ParquetDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_parquet_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ : str = ParquetDatasetWriter(_SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : Optional[int] = pq.ParquetFile(tmp_path / "foo.parquet" )
UpperCAmelCase_ : Optional[Any] = pf.read()
assert dataset.data.table == output_table
def a__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = str(shared_datadir / "test_image_rgb.jpg" )
UpperCAmelCase_ : Optional[int] = {"image": [image_path]}
UpperCAmelCase_ : List[str] = Features({"image": Image()} )
UpperCAmelCase_ : Any = Dataset.from_dict(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = ParquetDatasetWriter(_SCREAMING_SNAKE_CASE , tmp_path / "foo.parquet" )
assert writer.write() > 0
UpperCAmelCase_ : Tuple = Dataset.from_parquet(str(tmp_path / "foo.parquet" ) )
assert dataset.features == reloaded_dataset.features
UpperCAmelCase_ : str = ParquetDatasetReader(str(tmp_path / "foo.parquet" ) , streaming=_SCREAMING_SNAKE_CASE ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
"feature, expected" , [
(Features({"foo": Value("int32" )} ), None),
(Features({"image": Image(), "foo": Value("int32" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({"nested": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def a__ ( _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : Any ) -> Tuple:
"""simple docstring"""
assert get_writer_batch_size(_SCREAMING_SNAKE_CASE ) == expected
| 71 |
'''simple docstring'''
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def _lowerCAmelCase ( self: Optional[int]) ->Dict:
'''simple docstring'''
super().tearDown()
gc.collect()
def _lowerCAmelCase ( self: str) ->Optional[int]:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "bird"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png")
a_ = pipe.prepare_image_inputs([canny_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
def _lowerCAmelCase ( self: Union[str, Any]) ->str:
'''simple docstring'''
a_ , a_ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=a , dtype=jnp.bfloataa)
a_ , a_ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=a , from_pt=a , dtype=jnp.bfloataa)
a_ = controlnet_params
a_ = "Chef in the kitchen"
a_ = jax.device_count()
a_ = pipe.prepare_text_inputs([prompts] * num_samples)
a_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png")
a_ = pipe.prepare_image_inputs([pose_image] * num_samples)
a_ = jax.random.PRNGKey(0)
a_ = jax.random.split(a , jax.device_count())
a_ = replicate(a)
a_ = shard(a)
a_ = shard(a)
a_ = pipe(
prompt_ids=a , image=a , params=a , prng_seed=a , num_inference_steps=50 , jit=a , ).images
assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3)
a_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:])
a_ = images[0, 2_53:2_56, 2_53:2_56, -1]
a_ = jnp.asarray(jax.device_get(image_slice.flatten()))
a_ = jnp.array(
[[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]])
print(f"""output_slice: {output_slice}""")
assert jnp.abs(output_slice - expected_slice).max() < 1e-2
| 685 | 0 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
snake_case_ = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def _lowerCamelCase( UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> List[Any]:
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def _lowerCamelCase( UpperCamelCase__ : Optional[Any] ) -> Union[str, Any]:
A : List[Any] = _TestCommandArgs(dataset=UpperCamelCase__ , all_configs=UpperCamelCase__ , save_infos=UpperCamelCase__ )
A : str = TestCommand(*UpperCamelCase__ )
test_command.run()
A : Optional[Any] = os.path.join(UpperCamelCase__ , '''README.md''' )
assert os.path.exists(UpperCamelCase__ )
A : int = DatasetInfosDict.from_directory(UpperCamelCase__ )
A : str = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
A, A : Union[str, Any] = getattr(dataset_infos['''default'''] , UpperCamelCase__ ), getattr(expected_dataset_infos['''default'''] , UpperCamelCase__ )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase__ , UpperCamelCase__ )
elif key == "splits":
assert list(UpperCamelCase__ ) == list(UpperCamelCase__ )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 537 |
'''simple docstring'''
def _lowerCamelCase( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Optional[int]:
A : Optional[int] = 0
A : str = len(UpperCamelCase__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A : Any = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
A : Union[str, Any] = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A : Tuple = left
A : Tuple = point
elif point > right:
A : Optional[Any] = right
A : str = point
else:
if item < current_item:
A : Tuple = point - 1
else:
A : Optional[Any] = point + 1
return None
def _lowerCamelCase( UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] ) -> List[Any]:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A : Dict = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(UpperCamelCase__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
elif point > right:
return interpolation_search_by_recursion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , point - 1 )
else:
return interpolation_search_by_recursion(
UpperCamelCase__ , UpperCamelCase__ , point + 1 , UpperCamelCase__ )
def _lowerCamelCase( UpperCamelCase__ : Optional[int] ) -> List[Any]:
if collection != sorted(UpperCamelCase__ ):
raise ValueError('''Collection must be ascending sorted''' )
return True
if __name__ == "__main__":
import sys
snake_case_ = 0
if debug == 1:
snake_case_ = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit("""Sequence must be ascending sorted to apply interpolation search""")
snake_case_ = 67
snake_case_ = interpolation_search(collection, target)
if result is not None:
print(f'''{target} found at positions: {result}''')
else:
print("""Not found""")
| 537 | 1 |
from __future__ import annotations
import time
import numpy as np
__A : Any = [8, 5, 9, 7]
__A : Tuple = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
__A : Optional[Any] = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class lowerCamelCase:
'''simple docstring'''
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
_A = claim_vector
_A = allocated_resources_table
_A = maximum_claim_table
def lowerCAmelCase__ ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def lowerCAmelCase__ ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def lowerCAmelCase__ ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(snake_case_ ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def lowerCAmelCase__ ( self ):
return {self.__need().index(snake_case_ ): i for i in self.__need()}
def lowerCAmelCase__ ( self , **snake_case_ ):
_A = self.__need()
_A = self.__allocated_resources_table
_A = self.__available_resources()
_A = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
_A = False
for each_need in need_list:
_A = True
for index, need in enumerate(snake_case_ ):
if need > available_resources[index]:
_A = False
break
if execution:
_A = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
_A = original_need_index
print(F"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(snake_case_ )
# update available/freed resources stack
_A = np.array(snake_case_ ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(snake_case_ ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def lowerCAmelCase__ ( self ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
F"P{self.__allocated_resources_table.index(snake_case_ ) + 1}"
+ ' '.join(F"{it:>8}" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
F"P{self.__maximum_claim_table.index(snake_case_ ) + 1}"
+ ' '.join(F"{it:>8}" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(snake_case_ ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(snake_case_ ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_A = filter(lambda _SCREAMING_SNAKE_CASE : p.requires_grad , model.parameters() )
_A = sum([np.prod(p.size() ) for p in model_parameters] )
return params
__A : Union[str, Any] = logging.getLogger(__name__)
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
if metric == "rouge2":
_A = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
_A = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
_A = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
_A = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
F"seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"
' function.' )
_A = ModelCheckpoint(
dirpath=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , monitor=F"val_{metric}" , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def __lowerCAmelCase( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"val_{metric}" , mode='min' if 'loss' in metric else 'max' , patience=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , )
class lowerCamelCase( pl.Callback ):
'''simple docstring'''
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
_A = {F"lr_group_{i}": param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_=True ):
logger.info(F"***** {type_path} results at step {trainer.global_step:05d} *****" )
_A = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
_A = Path(pl_module.hparams.output_dir )
if type_path == "test":
_A = od / 'test_results.txt'
_A = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_A = od / F"{type_path}_results/{trainer.global_step:05d}.txt"
_A = od / F"{type_path}_generations/{trainer.global_step:05d}.txt"
results_file.parent.mkdir(exist_ok=snake_case_ )
generations_file.parent.mkdir(exist_ok=snake_case_ )
with open(snake_case_ , 'a+' ) as writer:
for key in sorted(snake_case_ ):
if key in ["log", "progress_bar", "preds"]:
continue
_A = metrics[key]
if isinstance(snake_case_ , torch.Tensor ):
_A = val.item()
_A = F"{key}: {val:.6f}\n"
writer.write(snake_case_ )
if not save_generations:
return
if "preds" in metrics:
_A = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(snake_case_ )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
try:
_A = pl_module.model.model.num_parameters()
except AttributeError:
_A = pl_module.model.num_parameters()
_A = count_trainable_parameters(snake_case_ )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(snake_case_ , snake_case_ , 'test' )
@rank_zero_only
def lowerCAmelCase__ ( self , snake_case_ , snake_case_ ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowerCAmelCase ( __a , unittest.TestCase ):
_lowercase =KandinskyInpaintPipeline
_lowercase =['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
_lowercase =[
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
_lowercase =[
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
_lowercase =False
@property
def __a ( self ) -> List[Any]:
return 32
@property
def __a ( self ) -> str:
return 32
@property
def __a ( self ) -> List[str]:
return self.time_input_dim
@property
def __a ( self ) -> Any:
return self.time_input_dim * 4
@property
def __a ( self ) -> Union[str, Any]:
return 100
@property
def __a ( self ) -> Dict:
lowerCAmelCase_ = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def __a ( self ) -> Optional[Any]:
torch.manual_seed(0 )
lowerCAmelCase_ = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , )
lowerCAmelCase_ = MultilingualCLIP(_UpperCamelCase )
lowerCAmelCase_ = text_encoder.eval()
return text_encoder
@property
def __a ( self ) -> str:
torch.manual_seed(0 )
lowerCAmelCase_ = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
lowerCAmelCase_ = UNetaDConditionModel(**_UpperCamelCase )
return model
@property
def __a ( self ) -> List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self ) -> Tuple:
torch.manual_seed(0 )
lowerCAmelCase_ = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self ) -> Tuple:
lowerCAmelCase_ = self.dummy_text_encoder
lowerCAmelCase_ = self.dummy_tokenizer
lowerCAmelCase_ = self.dummy_unet
lowerCAmelCase_ = self.dummy_movq
lowerCAmelCase_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=_UpperCamelCase , set_alpha_to_one=_UpperCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=_UpperCamelCase , )
lowerCAmelCase_ = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def __a ( self , _UpperCamelCase , _UpperCamelCase=0 ) -> Any:
lowerCAmelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase_ = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_UpperCamelCase )
# create init_image
lowerCAmelCase_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_UpperCamelCase ) ).to(_UpperCamelCase )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(_UpperCamelCase ) ).convert("RGB" ).resize((256, 256) )
# create mask
lowerCAmelCase_ = np.ones((64, 64) , dtype=np.floataa )
lowerCAmelCase_ = 0
if str(_UpperCamelCase ).startswith("mps" ):
lowerCAmelCase_ = torch.manual_seed(_UpperCamelCase )
else:
lowerCAmelCase_ = torch.Generator(device=_UpperCamelCase ).manual_seed(_UpperCamelCase )
lowerCAmelCase_ = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def __a ( self ) -> List[Any]:
lowerCAmelCase_ = "cpu"
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = self.pipeline_class(**_UpperCamelCase )
lowerCAmelCase_ = pipe.to(_UpperCamelCase )
pipe.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase_ = pipe(**self.get_dummy_inputs(_UpperCamelCase ) )
lowerCAmelCase_ = output.images
lowerCAmelCase_ = pipe(
**self.get_dummy_inputs(_UpperCamelCase ) , return_dict=_UpperCamelCase , )[0]
lowerCAmelCase_ = image[0, -3:, -3:, -1]
lowerCAmelCase_ = image_from_tuple[0, -3:, -3:, -1]
print(f"""image.shape {image.shape}""" )
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase_ = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
def __a ( self ) -> Dict:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _lowerCAmelCase ( unittest.TestCase ):
def __a ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self ) -> Optional[int]:
lowerCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
lowerCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
lowerCAmelCase_ = np.ones((768, 768) , dtype=np.floataa )
lowerCAmelCase_ = 0
lowerCAmelCase_ = "a hat"
lowerCAmelCase_ = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCamelCase )
lowerCAmelCase_ = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
lowerCAmelCase_ = pipeline.to(_UpperCamelCase )
pipeline.set_progress_bar_config(disable=_UpperCamelCase )
lowerCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCAmelCase_ , lowerCAmelCase_ = pipe_prior(
_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
lowerCAmelCase_ = pipeline(
_UpperCamelCase , image=_UpperCamelCase , mask_image=_UpperCamelCase , image_embeds=_UpperCamelCase , negative_image_embeds=_UpperCamelCase , generator=_UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_UpperCamelCase , _UpperCamelCase )
| 279 |
import functools
def lowerCamelCase__ ( __lowerCAmelCase : str , __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = len(__lowerCAmelCase )
lowerCAmelCase_ = len(__lowerCAmelCase )
@functools.cache
def min_distance(__lowerCAmelCase : int , __lowerCAmelCase : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
lowerCAmelCase_ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , __lowerCAmelCase ) , 1 + min_distance(__lowerCAmelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 279 | 1 |
import argparse
import datetime
import io
import itertools
import json
import math
import os
import platform
import re
import shlex
import subprocess
import sys
from pathlib import Path
from statistics import fmean
import pandas as pd
import torch
from tqdm import tqdm
import transformers
lowerCAmelCase_ = float("nan")
class A :
def __init__( self , lowercase_ ) -> Optional[int]:
'''simple docstring'''
_snake_case : int = sys.stdout
_snake_case : Any = open(__SCREAMING_SNAKE_CASE , '''a''' )
def __getattr__( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
return getattr(self.stdout , __SCREAMING_SNAKE_CASE )
def __a ( self , lowercase_ ) -> Union[str, Any]:
'''simple docstring'''
self.stdout.write(__SCREAMING_SNAKE_CASE )
# strip tqdm codes
self.file.write(re.sub(R'''^.*\r''' , '''''' , __SCREAMING_SNAKE_CASE , 0 , re.M ) )
def A_ ( lowercase_=80 , lowercase_=False ) -> Optional[int]:
_snake_case : Optional[int] = []
# deal with critical env vars
_snake_case : Dict = ['''CUDA_VISIBLE_DEVICES''']
for key in env_keys:
_snake_case : Tuple = os.environ.get(__A , __A )
if val is not None:
cmd.append(f'''{key}={val}''' )
# python executable (not always needed if the script is executable)
_snake_case : Dict = sys.executable if full_python_path else sys.executable.split('''/''' )[-1]
cmd.append(__A )
# now the normal args
cmd += list(map(shlex.quote , sys.argv ) )
# split up into up to MAX_WIDTH lines with shell multi-line escapes
_snake_case : Union[str, Any] = []
_snake_case : int = ''''''
while len(__A ) > 0:
current_line += f'''{cmd.pop(0 )} '''
if len(__A ) == 0 or len(__A ) + len(cmd[0] ) + 1 > max_width - 1:
lines.append(__A )
_snake_case : Optional[Any] = ''''''
return "\\\n".join(__A )
def A_ ( lowercase_ , lowercase_ ) -> Optional[int]:
# unwrap multi-line input
_snake_case : int = re.sub(r'''[\\\n]+''' , ''' ''' , args.base_cmd )
# remove --output_dir if any and set our own
_snake_case : str = re.sub('''--output_dir\s+[^\s]+''' , '''''' , args.base_cmd )
args.base_cmd += f''' --output_dir {output_dir}'''
# ensure we have --overwrite_output_dir
_snake_case : int = re.sub('''--overwrite_output_dir\s+''' , '''''' , args.base_cmd )
args.base_cmd += " --overwrite_output_dir"
return [sys.executable] + shlex.split(args.base_cmd )
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[Any]:
# Enable to debug everything but the run itself, to do it fast and see the progress.
# This is useful for debugging the output formatting quickly - we can remove it later once
# everybody is happy with the output
if 0:
import random
from time import sleep
sleep(0 )
return dict(
{k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 1_00.2, 55.66_66, 2_22.22_22_22_22] )} , )
_snake_case : List[str] = subprocess.run(__A , capture_output=__A , text=__A )
if verbose:
print('''STDOUT''' , result.stdout )
print('''STDERR''' , result.stderr )
# save the streams
_snake_case : Union[str, Any] = variation.replace(''' ''' , '''-''' )
with open(Path(__A ) / f'''log.{prefix}.stdout.txt''' , '''w''' ) as f:
f.write(result.stdout )
with open(Path(__A ) / f'''log.{prefix}.stderr.txt''' , '''w''' ) as f:
f.write(result.stderr )
if result.returncode != 0:
if verbose:
print('''failed''' )
return {target_metric_key: nan}
with io.open(f'''{output_dir}/all_results.json''' , '''r''' , encoding='''utf-8''' ) as f:
_snake_case : Union[str, Any] = json.load(__A )
# filter out just the keys we want
return {k: v for k, v in metrics.items() if k in metric_keys}
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> str:
_snake_case : Optional[int] = []
_snake_case : Dict = []
_snake_case : Tuple = f'''{id}: {variation:<{longest_variation_len}}'''
_snake_case : Dict = f'''{preamble}: '''
_snake_case : List[Any] = set(report_metric_keys + [target_metric_key] )
for i in tqdm(range(__A ) , desc=__A , leave=__A ):
_snake_case : Optional[int] = process_run_single(
__A , __A , __A , __A , __A , __A , __A )
_snake_case : Optional[Any] = single_run_metrics[target_metric_key]
if not math.isnan(__A ):
metrics.append(__A )
results.append(__A )
outcome += "✓"
else:
outcome += "✘"
_snake_case : List[str] = f'''\33[2K\r{outcome}'''
if len(__A ) > 0:
_snake_case : Union[str, Any] = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()}
_snake_case : Union[str, Any] = round(mean_metrics[target_metric_key] , 2 )
_snake_case : Tuple = f'''{outcome} {mean_target}'''
if len(__A ) > 1:
results_str += f''' {tuple(round(__A , 2 ) for x in results )}'''
print(__A )
_snake_case : Optional[int] = variation
return mean_metrics
else:
print(__A )
return {variation_key: variation, target_metric_key: nan}
def A_ ( ) -> Optional[Any]:
_snake_case : Dict = torch.cuda.get_device_properties(torch.device('''cuda''' ) )
return f'''\nDatetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )}\n\nSoftware:\ntransformers: {transformers.__version__}\ntorch : {torch.__version__}\ncuda : {torch.version.cuda}\npython : {platform.python_version()}\n\nHardware:\n{torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB\n'''
def A_ ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
_snake_case : Dict = pd.DataFrame(__A )
_snake_case : int = '''variation'''
_snake_case : List[Any] = '''diff_%'''
_snake_case : List[str] = nan
if base_variation is not None and len(df[df[variation_key] == base_variation] ):
# this may still return nan
_snake_case : List[Any] = df.loc[df[variation_key] == base_variation][target_metric_key].item()
if math.isnan(__A ):
# as a fallback, use the minimal value as the sentinel
_snake_case : List[str] = df.loc[df[target_metric_key] != nan][target_metric_key].min()
# create diff column if possible
if not math.isnan(__A ):
_snake_case : Dict = df.apply(
lambda lowercase_ : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value )
if not math.isnan(r[target_metric_key] )
else 0 , axis='''columns''' , )
# re-order columns
_snake_case : Tuple = [variation_key, target_metric_key, diff_key, *report_metric_keys]
_snake_case : Tuple = df.reindex(__A , axis='''columns''' ) # reorder cols
# capitalize
_snake_case : Tuple = df.rename(str.capitalize , axis='''columns''' )
# make the cols as narrow as possible
_snake_case : int = df.rename(lambda lowercase_ : c.replace('''_''' , '''<br>''' ) , axis='''columns''' )
_snake_case : List[str] = df.rename(lambda lowercase_ : c.replace('''_''' , '''\n''' ) , axis='''columns''' )
_snake_case : int = ['''''', '''Copy between the cut-here-lines and paste as is to github or a forum''']
report += ["----------8<-----------------8<--------"]
report += ["*** Results:", df_github.to_markdown(index=__A , floatfmt='''.2f''' )]
report += ["```"]
report += ["*** Setup:", get_versions()]
report += ["*** The benchmark command line was:", get_original_command()]
report += ["```"]
report += ["----------8<-----------------8<--------"]
report += ["*** Results (console):", df_console.to_markdown(index=__A , floatfmt='''.2f''' )]
print('''\n\n'''.join(__A ) )
def A_ ( ) -> int:
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
'''--base-cmd''' , default=__A , type=__A , required=__A , help='''Base cmd''' , )
parser.add_argument(
'''--variations''' , default=__A , type=__A , nargs='''+''' , required=__A , help='''Multi-dimensional variations, example: \'|--fp16|--bf16\' \'|--tf32\'''' , )
parser.add_argument(
'''--base-variation''' , default=__A , type=__A , help='''Baseline variation to compare to. if None the minimal target value will be used to compare against''' , )
parser.add_argument(
'''--target-metric-key''' , default=__A , type=__A , required=__A , help='''Target metric key in output_dir/all_results.json, e.g., train_samples_per_second''' , )
parser.add_argument(
'''--report-metric-keys''' , default='''''' , type=__A , help='''Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., \'train_loss train_samples''' , )
parser.add_argument(
'''--repeat-times''' , default=1 , type=__A , help='''How many times to re-run each variation - an average will be reported''' , )
parser.add_argument(
'''--output_dir''' , default='''output_benchmark''' , type=__A , help='''The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked''' , )
parser.add_argument(
'''--verbose''' , default=__A , action='''store_true''' , help='''Whether to show the outputs of each run or just the benchmark progress''' , )
_snake_case : Tuple = parser.parse_args()
_snake_case : Any = args.output_dir
Path(__A ).mkdir(exist_ok=__A )
_snake_case : Any = get_base_command(__A , __A )
# split each dimension into its --foo variations
_snake_case : Optional[int] = [list(map(str.strip , re.split(r'''\|''' , __A ) ) ) for x in args.variations]
# build a cartesian product of dimensions and convert those back into cmd-line arg strings,
# while stripping white space for inputs that were empty
_snake_case : Tuple = list(map(str.strip , map(''' '''.join , itertools.product(*__A ) ) ) )
_snake_case : str = max(len(__A ) for x in variations )
# split wanted keys
_snake_case : List[str] = args.report_metric_keys.split()
# capture prints into a log file for convenience
_snake_case : List[str] = f'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt'''
print(f'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' )
print(f'''and this script\'s output is also piped into {report_fn}''' )
_snake_case : int = Tee(__A )
print(f'''\n*** Running {len(__A )} benchmarks:''' )
print(f'''Base command: {' '.join(__A )}''' )
_snake_case : Optional[int] = '''variation'''
_snake_case : List[str] = []
for id, variation in enumerate(tqdm(__A , desc='''Total completion: ''' , leave=__A ) ):
_snake_case : Optional[int] = base_cmd + variation.split()
results.append(
process_run(
id + 1 , __A , __A , __A , __A , args.target_metric_key , __A , args.repeat_times , __A , args.verbose , ) )
process_results(__A , args.target_metric_key , __A , args.base_variation , __A )
if __name__ == "__main__":
main()
| 326 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE_ ):
snake_case__ = 42
snake_case__ = 42
def _UpperCAmelCase ( __A : str ):
if not isinstance(__A , __A ):
raise TypeError('''The parameter s type must be str.''' )
return [s[i:] + s[:i] for i in range(len(__A ) )]
def _UpperCAmelCase ( __A : str ):
if not isinstance(__A , __A ):
raise TypeError('''The parameter s type must be str.''' )
if not s:
raise ValueError('''The parameter s must not be empty.''' )
a_ : str = all_rotations(__A )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
a_ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(__A ),
}
return response
def _UpperCAmelCase ( __A : str , __A : int ):
if not isinstance(__A , __A ):
raise TypeError('''The parameter bwt_string type must be str.''' )
if not bwt_string:
raise ValueError('''The parameter bwt_string must not be empty.''' )
try:
a_ : Union[str, Any] = int(__A )
except ValueError:
raise TypeError(
'''The parameter idx_original_string type must be int or passive'''
''' of cast to int.''' )
if idx_original_string < 0:
raise ValueError('''The parameter idx_original_string must not be lower than 0.''' )
if idx_original_string >= len(__A ):
raise ValueError(
'''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' )
a_ : Union[str, Any] = [''''''] * len(__A )
for _ in range(len(__A ) ):
for i in range(len(__A ) ):
a_ : int = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
__lowerCAmelCase = 'Provide a string that I will generate its BWT transform: '
__lowerCAmelCase = input(entry_msg).strip()
__lowerCAmelCase = bwt_transform(s)
print(
F"""Burrows Wheeler transform for string '{s}' results """
F"""in '{result['bwt_string']}'"""
)
__lowerCAmelCase = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F"""Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' """
F"""we get original string '{original_string}'"""
)
| 466 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : str = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 708 |
"""simple docstring"""
from statistics import mean, stdev
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 3 ) -> list:
"""simple docstring"""
A = min(UpperCamelCase__ )
A = max(UpperCamelCase__ )
# normalize data
return [round((x - x_min) / (x_max - x_min) , UpperCamelCase__ ) for x in data]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ = 3 ) -> list:
"""simple docstring"""
A = mean(UpperCamelCase__ )
A = stdev(UpperCamelCase__ )
# standardize data
return [round((x - mu) / (sigma) , UpperCamelCase__ ) for x in data]
| 91 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class _a ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=7 , __UpperCAmelCase=3 , __UpperCAmelCase=18 , __UpperCAmelCase=30 , __UpperCAmelCase=400 , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=[0.5, 0.5, 0.5] , __UpperCAmelCase=[0.5, 0.5, 0.5] , ):
__A : Optional[Any] = size if size is not None else {"shortest_edge": 18}
__A : Optional[int] = crop_size if crop_size is not None else {"height": 18, "width": 18}
__A : Tuple = parent
__A : Tuple = batch_size
__A : Union[str, Any] = num_channels
__A : Optional[int] = image_size
__A : Optional[Any] = min_resolution
__A : Optional[Any] = max_resolution
__A : Optional[Any] = do_resize
__A : Tuple = size
__A : Any = do_center_crop
__A : Tuple = crop_size
__A : Dict = do_normalize
__A : Dict = image_mean
__A : Optional[int] = image_std
def __UpperCAmelCase( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _a ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Any = LevitImageProcessor if is_vision_available() else None
def __UpperCAmelCase( self ):
__A : str = LevitImageProcessingTester(self )
@property
def __UpperCAmelCase( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCAmelCase( self ):
__A : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "do_center_crop" ) )
self.assertTrue(hasattr(__UpperCAmelCase , "size" ) )
def __UpperCAmelCase( self ):
__A : str = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18} )
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} )
__A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"shortest_edge": 42} )
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} )
def __UpperCAmelCase( self ):
pass
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
__A : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : Optional[Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
__A : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : Any = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def __UpperCAmelCase( self ):
# Initialize image_processing
__A : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
__A : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__A : Union[str, Any] = image_processing(__UpperCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 520 | import os
import shutil
from pathlib import Path
from typing import Optional, Union
import numpy as np
from huggingface_hub import hf_hub_download
from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging
if is_onnx_available():
import onnxruntime as ort
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'tensor(bool)': np.bool_,
'tensor(int8)': np.inta,
'tensor(uint8)': np.uinta,
'tensor(int16)': np.intaa,
'tensor(uint16)': np.uintaa,
'tensor(int32)': np.intaa,
'tensor(uint32)': np.uintaa,
'tensor(int64)': np.intaa,
'tensor(uint64)': np.uintaa,
'tensor(float16)': np.floataa,
'tensor(float)': np.floataa,
'tensor(double)': np.floataa,
}
class _a :
'''simple docstring'''
def __init__( self , __UpperCAmelCase=None , **__UpperCAmelCase ):
logger.info("`diffusers.OnnxRuntimeModel` is experimental and might change in the future." )
__A : Optional[Any] = model
__A : str = kwargs.get("model_save_dir" , __UpperCAmelCase )
__A : List[str] = kwargs.get("latest_model_name" , __UpperCAmelCase )
def __call__( self , **__UpperCAmelCase ):
__A : Any = {k: np.array(__UpperCAmelCase ) for k, v in kwargs.items()}
return self.model.run(__UpperCAmelCase , __UpperCAmelCase )
@staticmethod
def __UpperCAmelCase( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ):
if provider is None:
logger.info("No onnxruntime provider specified, using CPUExecutionProvider" )
__A : Optional[Any] = "CPUExecutionProvider"
return ort.InferenceSession(__UpperCAmelCase , providers=[provider] , sess_options=__UpperCAmelCase )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase = None , **__UpperCAmelCase ):
__A : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
__A : Any = self.model_save_dir.joinpath(self.latest_model_name )
__A : List[str] = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
# copy external weights (for models >2GB)
__A : str = self.model_save_dir.joinpath(__UpperCAmelCase )
if src_path.exists():
__A : Any = Path(__UpperCAmelCase ).joinpath(__UpperCAmelCase )
try:
shutil.copyfile(__UpperCAmelCase , __UpperCAmelCase )
except shutil.SameFileError:
pass
def __UpperCAmelCase( self , __UpperCAmelCase , **__UpperCAmelCase , ):
if os.path.isfile(__UpperCAmelCase ):
logger.error(F"Provided path ({save_directory}) should be a directory, not a file" )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
# saving model weights/files
self._save_pretrained(__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def __UpperCAmelCase( cls , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = False , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : Tuple = file_name if file_name is not None else ONNX_WEIGHTS_NAME
# load model from local directory
if os.path.isdir(__UpperCAmelCase ):
__A : Union[str, Any] = OnnxRuntimeModel.load_model(
os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
__A : List[Any] = Path(__UpperCAmelCase )
# load model from hub
else:
# download model
__A : List[str] = hf_hub_download(
repo_id=__UpperCAmelCase , filename=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , )
__A : Optional[int] = Path(__UpperCAmelCase ).parent
__A : List[str] = Path(__UpperCAmelCase ).name
__A : List[str] = OnnxRuntimeModel.load_model(__UpperCAmelCase , provider=__UpperCAmelCase , sess_options=__UpperCAmelCase )
return cls(model=__UpperCAmelCase , **__UpperCAmelCase )
@classmethod
def __UpperCAmelCase( cls , __UpperCAmelCase , __UpperCAmelCase = True , __UpperCAmelCase = None , __UpperCAmelCase = None , **__UpperCAmelCase , ):
__A : Tuple = None
if len(str(__UpperCAmelCase ).split("@" ) ) == 2:
__A , __A : int = model_id.split("@" )
return cls._from_pretrained(
model_id=__UpperCAmelCase , revision=__UpperCAmelCase , cache_dir=__UpperCAmelCase , force_download=__UpperCAmelCase , use_auth_token=__UpperCAmelCase , **__UpperCAmelCase , )
| 520 | 1 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self: Dict , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[Any]=13 , _SCREAMING_SNAKE_CASE: Tuple=7 , _SCREAMING_SNAKE_CASE: Any=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: int=True , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: str=99 , _SCREAMING_SNAKE_CASE: Union[str, Any]=32 , _SCREAMING_SNAKE_CASE: Union[str, Any]=5 , _SCREAMING_SNAKE_CASE: Dict=4 , _SCREAMING_SNAKE_CASE: List[str]=37 , _SCREAMING_SNAKE_CASE: int="gelu" , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: int=0.1 , _SCREAMING_SNAKE_CASE: List[Any]=512 , _SCREAMING_SNAKE_CASE: Tuple=16 , _SCREAMING_SNAKE_CASE: Optional[int]=2 , _SCREAMING_SNAKE_CASE: str=0.02 , _SCREAMING_SNAKE_CASE: Dict=False , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Dict="None" , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: Tuple=4 , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : str = parent
__lowerCAmelCase : List[str] = batch_size
__lowerCAmelCase : str = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[Any] = use_input_mask
__lowerCAmelCase : List[str] = use_token_type_ids
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : Tuple = vocab_size
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : int = num_attention_heads
__lowerCAmelCase : Optional[int] = intermediate_size
__lowerCAmelCase : Dict = hidden_act
__lowerCAmelCase : Any = hidden_dropout_prob
__lowerCAmelCase : str = attention_probs_dropout_prob
__lowerCAmelCase : int = max_position_embeddings
__lowerCAmelCase : List[Any] = type_vocab_size
__lowerCAmelCase : Optional[int] = type_sequence_label_size
__lowerCAmelCase : List[Any] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : List[str] = relative_attention
__lowerCAmelCase : List[Any] = position_biased_input
__lowerCAmelCase : List[str] = pos_att_type
__lowerCAmelCase : Union[str, Any] = scope
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__lowerCAmelCase : str = None
if self.use_input_mask:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__lowerCAmelCase : str = None
__lowerCAmelCase : List[str] = None
__lowerCAmelCase : Any = None
if self.use_labels:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices)
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self: Any) -> Tuple:
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _SCREAMING_SNAKE_CASE ( self: str) -> str:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.get_config()
__lowerCAmelCase : Tuple = 300
return config
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size()) , [])
def _SCREAMING_SNAKE_CASE ( self: int , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: List[Any]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = DebertaModel(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE)[0]
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE)[0]
self.parent.assertListEqual(list(sequence_output.size()) , [self.batch_size, self.seq_length, self.hidden_size])
def _SCREAMING_SNAKE_CASE ( self: Optional[Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Any = DebertaForMaskedLM(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def _SCREAMING_SNAKE_CASE ( self: str , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any] , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: str , _SCREAMING_SNAKE_CASE: Union[str, Any]) -> str:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Tuple = DebertaForSequenceClassification(_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertListEqual(list(result.logits.size()) , [self.batch_size, self.num_labels])
self.check_loss_output(_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: List[Any] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Tuple , _SCREAMING_SNAKE_CASE: int , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: str) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.num_labels
__lowerCAmelCase : Optional[Any] = DebertaForTokenClassification(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self: Dict , _SCREAMING_SNAKE_CASE: List[str] , _SCREAMING_SNAKE_CASE: Optional[int] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Dict , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Union[str, Any] , _SCREAMING_SNAKE_CASE: Tuple) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : Tuple = DebertaForQuestionAnswering(config=_SCREAMING_SNAKE_CASE)
model.to(_SCREAMING_SNAKE_CASE)
model.eval()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.prepare_config_and_inputs()
(
__lowerCAmelCase
) : Union[str, Any] = config_and_inputs
__lowerCAmelCase : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class A__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : List[Any] = DebertaModelTester(self)
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self: Dict) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[str]:
"""simple docstring"""
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_SCREAMING_SNAKE_CASE)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_SCREAMING_SNAKE_CASE)
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[int]:
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : int = DebertaModel.from_pretrained(_SCREAMING_SNAKE_CASE)
self.assertIsNotNone(_SCREAMING_SNAKE_CASE)
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet")
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
pass
@slow
def _SCREAMING_SNAKE_CASE ( self: Tuple) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = DebertaModel.from_pretrained("microsoft/deberta-base")
__lowerCAmelCase : Tuple = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]])
__lowerCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]])
with torch.no_grad():
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE)[0]
# compare the actual values for a slice.
__lowerCAmelCase : Any = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]])
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1e-4) , F"""{output[:, 1:4, 1:4]}""") | 701 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = ['torch', 'scipy']
def __init__( self: Tuple , *_SCREAMING_SNAKE_CASE: Dict , **_SCREAMING_SNAKE_CASE: str) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["torch", "scipy"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Optional[Any] , *_SCREAMING_SNAKE_CASE: Tuple , **_SCREAMING_SNAKE_CASE: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"])
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: int , *_SCREAMING_SNAKE_CASE: List[Any] , **_SCREAMING_SNAKE_CASE: Dict) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls , ["torch", "scipy"]) | 615 | 0 |
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
_lowercase : int = []
_lowercase : Optional[Any] = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) ) # Size of every segment
_lowercase : Union[str, Any] = [True] * (end + 1)
_lowercase : List[str] = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = False
start += 1
prime += in_prime
_lowercase : int = end + 1
_lowercase : List[str] = min(2 * end , SCREAMING_SNAKE_CASE )
while low <= n:
_lowercase : str = [True] * (high - low + 1)
for each in in_prime:
_lowercase : Dict = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE , high + 1 , SCREAMING_SNAKE_CASE ):
_lowercase : Optional[Any] = False
for j in range(len(SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
_lowercase : Tuple = high + 1
_lowercase : Any = min(high + end , SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 66 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCAmelCase = get_logger(__name__)
class _UpperCAmelCase :
def __init__( self , a__ = None ):
A_ : List[Any] = (
os.path.join(a__ , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
A_ : Optional[Any] = Extractor
def _lowerCamelCase ( self , a__ ):
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
A_ : Dict = os.path.abspath(a__ )
return os.path.join(self.extract_dir , hash_url_to_filename(a__ ) )
def _lowerCamelCase ( self , a__ , a__ ):
return force_extract or (
not os.path.isfile(a__ ) and not (os.path.isdir(a__ ) and os.listdir(a__ ))
)
def _lowerCamelCase ( self , a__ , a__ = False ):
A_ : Optional[int] = self.extractor.infer_extractor_format(a__ )
if not extractor_format:
return input_path
A_ : str = self._get_output_path(a__ )
if self._do_extract(a__ , a__ ):
self.extractor.extract(a__ , a__ , a__ )
return output_path
class _UpperCAmelCase ( _lowerCamelCase ):
@classmethod
@abstractmethod
def _lowerCamelCase ( cls , a__ , **a__ ):
...
@staticmethod
@abstractmethod
def _lowerCamelCase ( a__ , a__ ):
...
class _UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
a = []
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
with open(a__ , """rb""" ) as f:
return f.read(a__ )
@classmethod
def _lowerCamelCase ( cls , a__ , a__ = b"" ):
if not magic_number:
A_ : List[str] = max(len(a__ ) for cls_magic_number in cls.magic_numbers )
try:
A_ : Union[str, Any] = cls.read_magic_number(a__ , a__ )
except OSError:
return False
return any(magic_number.startswith(a__ ) for cls_magic_number in cls.magic_numbers )
class _UpperCAmelCase ( _lowerCamelCase ):
@classmethod
def _lowerCamelCase ( cls , a__ , **a__ ):
return tarfile.is_tarfile(a__ )
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
def resolved(a__ ) -> str:
return os.path.realpath(os.path.abspath(a__ ) )
def badpath(a__ , a__ ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(a__ , a__ ) ).startswith(a__ )
def badlink(a__ , a__ ) -> bool:
# Links are interpreted relative to the directory containing the link
A_ : Tuple = resolved(os.path.join(a__ , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=a__ )
A_ : Dict = resolved(a__ )
for finfo in members:
if badpath(finfo.name , a__ ):
logger.error(F"""Extraction of {finfo.name} is blocked (illegal path)""" )
elif finfo.issym() and badlink(a__ , a__ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}""" )
elif finfo.islnk() and badlink(a__ , a__ ):
logger.error(F"""Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}""" )
else:
yield finfo
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
os.makedirs(a__ , exist_ok=a__ )
A_ : List[Any] = tarfile.open(a__ )
tar_file.extractall(a__ , members=TarExtractor.safemembers(a__ , a__ ) )
tar_file.close()
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x1F\x8B''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
with gzip.open(a__ , """rb""" ) as gzip_file:
with open(a__ , """wb""" ) as extracted_file:
shutil.copyfileobj(a__ , a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [
B'''PK\x03\x04''',
B'''PK\x05\x06''', # empty archive
B'''PK\x07\x08''', # spanned archive
]
@classmethod
def _lowerCamelCase ( cls , a__ , a__ = b"" ):
if super().is_extractable(a__ , magic_number=a__ ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(a__ , """rb""" ) as fp:
A_ : Tuple = _EndRecData(a__ )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
A_ : Any = fp.read(a__ ) # CD is where we expect it to be
if len(a__ ) == sizeCentralDir:
A_ : Tuple = struct.unpack(a__ , a__ ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
os.makedirs(a__ , exist_ok=a__ )
with zipfile.ZipFile(a__ , """r""" ) as zip_file:
zip_file.extractall(a__ )
zip_file.close()
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\xFD\x37\x7A\x58\x5A\x00''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
with lzma.open(a__ ) as compressed_file:
with open(a__ , """wb""" ) as extracted_file:
shutil.copyfileobj(a__ , a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''Rar!\x1a\x07\x00''', B'''Rar!\x1a\x07\x01\x00'''] # RAR_ID # RAR5_ID
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(a__ , exist_ok=a__ )
A_ : Dict = rarfile.RarFile(a__ )
rf.extractall(a__ )
rf.close()
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x28\xb5\x2F\xFD''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
A_ : str = zstd.ZstdDecompressor()
with open(a__ , """rb""" ) as ifh, open(a__ , """wb""" ) as ofh:
dctx.copy_stream(a__ , a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x42\x5A\x68''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
with bza.open(a__ , """rb""" ) as compressed_file:
with open(a__ , """wb""" ) as extracted_file:
shutil.copyfileobj(a__ , a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x37\x7A\xBC\xAF\x27\x1C''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(a__ , exist_ok=a__ )
with pyazr.SevenZipFile(a__ , """r""" ) as archive:
archive.extractall(a__ )
class _UpperCAmelCase ( _lowerCamelCase ):
a = [B'''\x04\x22\x4D\x18''']
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(a__ , """rb""" ) as compressed_file:
with open(a__ , """wb""" ) as extracted_file:
shutil.copyfileobj(a__ , a__ )
class _UpperCAmelCase :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
a = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def _lowerCamelCase ( cls ):
return max(
len(a__ )
for extractor in cls.extractors.values()
if issubclass(a__ , a__ )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def _lowerCamelCase ( a__ , a__ ):
try:
return MagicNumberBaseExtractor.read_magic_number(a__ , magic_number_length=a__ )
except OSError:
return b""
@classmethod
def _lowerCamelCase ( cls , a__ , a__ = False ):
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=a__ , )
A_ : Union[str, Any] = cls.infer_extractor_format(a__ )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def _lowerCamelCase ( cls , a__ ): # <Added version="2.4.0"/>
A_ : Union[str, Any] = cls._get_magic_number_max_length()
A_ : Tuple = cls._read_magic_number(a__ , a__ )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(a__ , magic_number=a__ ):
return extractor_format
@classmethod
def _lowerCamelCase ( cls , a__ , a__ , a__ = None , a__ = "deprecated" , ):
os.makedirs(os.path.dirname(a__ ) , exist_ok=a__ )
# Prevent parallel extractions
A_ : Optional[int] = str(Path(a__ ).with_suffix(""".lock""" ) )
with FileLock(a__ ):
shutil.rmtree(a__ , ignore_errors=a__ )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(a__ , a__ ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=a__ , )
A_ : Optional[int] = extractor if extractor != """deprecated""" else extractor_format
else:
A_ : Optional[int] = cls.extractors[extractor_format]
return extractor.extract(a__ , a__ )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=a__ , )
for extractor in cls.extractors.values():
if extractor.is_extractable(a__ ):
return extractor.extract(a__ , a__ )
| 569 | 0 |
'''simple docstring'''
def snake_case (UpperCamelCase : str , UpperCamelCase : str ):
'''simple docstring'''
def get_matched_characters(UpperCamelCase : str , UpperCamelCase : str ) -> str:
lowerCamelCase__ = []
lowerCamelCase__ = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowerCamelCase__ = int(max(0 , i - limit ) )
lowerCamelCase__ = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(UpperCamelCase )
lowerCamelCase__ = f'''{_stra[0:_stra.index(UpperCamelCase )]} {_stra[_stra.index(UpperCamelCase ) + 1:]}'''
return "".join(UpperCamelCase )
# matching characters
lowerCamelCase__ = get_matched_characters(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = get_matched_characters(UpperCamelCase , UpperCamelCase )
lowerCamelCase__ = len(UpperCamelCase )
# transposition
lowerCamelCase__ = (
len([(ca, ca) for ca, ca in zip(UpperCamelCase , UpperCamelCase ) if ca != ca] ) // 2
)
if not match_count:
lowerCamelCase__ = 0.0
else:
lowerCamelCase__ = (
1
/ 3
* (
match_count / len(UpperCamelCase )
+ match_count / len(UpperCamelCase )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowerCamelCase__ = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 705 |
import math
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case (UpperCamelCase : float = 0.1 ):
'''simple docstring'''
lowerCamelCase__ = 3
lowerCamelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 235 | 0 |
import operator
def UpperCamelCase ( _A : str , _A : int = False , _A : Union[str, Any] = None )-> Union[str, Any]:
"""simple docstring"""
A__ = operator.lt if reverse else operator.gt
A__ = solution or []
if not arr:
return solution
A__ = [arr.pop(0 )]
for i, item in enumerate(_lowercase ):
if _operator(_lowercase , sublist[-1] ):
sublist.append(_lowercase )
arr.pop(_lowercase )
# merging sublist into solution list
if not solution:
solution.extend(_lowercase )
else:
while sublist:
A__ = sublist.pop(0 )
for i, xx in enumerate(_lowercase ):
if not _operator(_lowercase , _lowercase ):
solution.insert(_lowercase , _lowercase )
break
else:
solution.append(_lowercase )
strand_sort(_lowercase , _lowercase , _lowercase )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 491 |
"""simple docstring"""
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
@add_end_docstrings(lowerCamelCase_ )
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
def __init__( self , **lowerCamelCase_) -> Tuple:
super().__init__(**lowerCamelCase_)
requires_backends(self , '''vision''')
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING)
def __call__( self , lowerCamelCase_ , **lowerCamelCase_) -> Optional[int]:
return super().__call__(lowerCamelCase_ , **lowerCamelCase_)
def UpperCAmelCase__ ( self , **lowerCamelCase_) -> Any:
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = kwargs['''candidate_labels''']
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['''hypothesis_template''']
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_="This is a photo of {}.") -> Union[str, Any]:
UpperCamelCase = load_image(lowerCamelCase_)
UpperCamelCase = self.image_processor(images=[image] , return_tensors=self.framework)
UpperCamelCase = candidate_labels
UpperCamelCase = [hypothesis_template.format(lowerCamelCase_) for x in candidate_labels]
UpperCamelCase = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework , padding=lowerCamelCase_)
UpperCamelCase = [text_inputs]
return inputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_inputs.pop('''candidate_labels''')
UpperCamelCase = model_inputs.pop('''text_inputs''')
if isinstance(text_inputs[0] , lowerCamelCase_):
UpperCamelCase = text_inputs[0]
else:
# Batching case.
UpperCamelCase = text_inputs[0][0]
UpperCamelCase = self.model(**lowerCamelCase_ , **lowerCamelCase_)
UpperCamelCase = {
'''candidate_labels''': candidate_labels,
'''logits''': outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self , lowerCamelCase_) -> Any:
UpperCamelCase = model_outputs.pop('''candidate_labels''')
UpperCamelCase = model_outputs['''logits'''][0]
if self.framework == "pt":
UpperCamelCase = logits.softmax(dim=-1).squeeze(-1)
UpperCamelCase = probs.tolist()
if not isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = [scores]
elif self.framework == "tf":
UpperCamelCase = stable_softmax(lowerCamelCase_ , axis=-1)
UpperCamelCase = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}')
UpperCamelCase = [
{'''score''': score, '''label''': candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase_ , lowerCamelCase_) , key=lambda lowerCamelCase_: -x[0])
]
return result | 34 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase : int = {"processing_layoutxlm": ["LayoutXLMProcessor"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = ["LayoutXLMTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = ["LayoutXLMTokenizerFast"]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
from __future__ import annotations
def a__ ( snake_case__ , snake_case__ = None , snake_case__ = None ) -> None:
if start is None:
lowerCamelCase = 0
if end is None:
lowerCamelCase = len(snake_case__ ) - 1
if start >= end:
return
lowerCamelCase = (start + end) // 2
slowsort(snake_case__ , snake_case__ , snake_case__ )
slowsort(snake_case__ , mid + 1 , snake_case__ )
if sequence[end] < sequence[mid]:
lowerCamelCase , lowerCamelCase = sequence[mid], sequence[end]
slowsort(snake_case__ , snake_case__ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 533 | 0 |
'''simple docstring'''
from math import sqrt
def A (__lowerCamelCase :str ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A (__lowerCamelCase :Tuple = 10001 ):
_lowerCAmelCase = 0
_lowerCAmelCase = 1
while count != nth and number < 3:
number += 1
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
while count != nth:
number += 2
if is_prime(SCREAMING_SNAKE_CASE_ ):
count += 1
return number
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 | """simple docstring"""
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _UpperCAmelCase ( a_ ):
"""simple docstring"""
def a__ ( self ) -> str:
_lowerCamelCase : Any = tempfile.mkdtemp()
_lowerCamelCase : List[Any] = 5
# Realm tok
_lowerCamelCase : List[str] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
_lowerCamelCase : int = os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
_lowerCamelCase : Any = os.path.join(_lowercase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_lowerCamelCase : Optional[int] = os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(_lowercase , exist_ok=_lowercase )
def a__ ( self ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def a__ ( self ) -> int:
shutil.rmtree(self.tmpdirname )
def a__ ( self ) -> str:
_lowerCamelCase : Optional[Any] = RealmConfig(num_block_records=self.num_block_records )
return config
def a__ ( self ) -> Optional[int]:
_lowerCamelCase : Tuple = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def a__ ( self ) -> int:
_lowerCamelCase : Dict = np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=_lowercase , )
return block_records
def a__ ( self ) -> Dict:
_lowerCamelCase : Union[str, Any] = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def a__ ( self ) -> int:
_lowerCamelCase : int = self.get_config()
_lowerCamelCase : Optional[Any] = self.get_dummy_retriever()
_lowerCamelCase : Union[str, Any] = retriever.tokenizer
_lowerCamelCase : Any = np.array([0, 3] , dtype='''long''' )
_lowerCamelCase : Any = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : str = tokenizer(
['''the fourth'''] , add_special_tokens=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , ).input_ids
_lowerCamelCase : List[str] = config.reader_seq_len
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Union[str, Any] = retriever(
_lowercase , _lowercase , answer_ids=_lowercase , max_length=_lowercase , return_tensors='''np''' )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(len(_lowercase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def a__ ( self ) -> int:
_lowerCamelCase : str = self.get_config()
_lowerCamelCase : Union[str, Any] = self.get_dummy_retriever()
_lowerCamelCase : str = retriever.tokenizer
_lowerCamelCase : List[str] = np.array([0, 3, 5] , dtype='''long''' )
_lowerCamelCase : List[Any] = tokenizer(['''Test question'''] ).input_ids
_lowerCamelCase : List[str] = tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=_lowercase , return_token_type_ids=_lowercase , return_attention_mask=_lowercase , ).input_ids
_lowerCamelCase : Optional[Any] = config.reader_seq_len
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : List[Any] = retriever(
_lowercase , _lowercase , answer_ids=_lowercase , max_length=_lowercase , return_tensors='''np''' )
self.assertEqual([False, True, True] , _lowercase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowercase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowercase )
def a__ ( self ) -> int:
_lowerCamelCase : Optional[int] = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
_lowerCamelCase : Dict = retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
_lowerCamelCase : Optional[Any] = os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
_lowerCamelCase : List[Any] = RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 434 | 0 |
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
snake_case_ : int = "\\n@article{wang2019superglue,\n title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},\n author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},\n journal={arXiv preprint arXiv:1905.00537},\n year={2019}\n}\n"
snake_case_ : List[str] = "\\nSuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after\nGLUE with a new set of more difficult language understanding tasks, improved\nresources, and a new public leaderboard.\n"
snake_case_ : Optional[int] = "\nCompute SuperGLUE evaluation metric associated to each SuperGLUE dataset.\nArgs:\n predictions: list of predictions to score. Depending on the SuperGlUE subset:\n - for 'record': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'prediction_text': the predicted answer text\n - for 'multirc': list of question-answer dictionaries with the following keys:\n - 'idx': index of the question-answer pair as specified by the dataset\n - 'prediction': the predicted answer label\n - otherwise: list of predicted labels\n references: list of reference labels. Depending on the SuperGLUE subset:\n - for 'record': list of question-answers dictionaries with the following keys:\n - 'idx': index of the question as specified by the dataset\n - 'answers': list of possible answers\n - otherwise: list of reference labels\nReturns: depending on the SuperGLUE subset:\n - for 'record':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1': F1 score\n - for 'multirc':\n - 'exact_match': Exact match between answer and gold answer\n - 'f1_m': Per-question macro-F1 score\n - 'f1_a': Average F1 score over all answers\n - for 'axb':\n 'matthews_correlation': Matthew Correlation\n - for 'cb':\n - 'accuracy': Accuracy\n - 'f1': F1 score\n - for all others:\n - 'accuracy': Accuracy\nExamples:\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'copa') # any of [\"copa\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"boolq\", \"axg\"]\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'cb')\n >>> predictions = [0, 1]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'record')\n >>> predictions = [{'idx': {'passage': 0, 'query': 0}, 'prediction_text': 'answer'}]\n >>> references = [{'idx': {'passage': 0, 'query': 0}, 'answers': ['answer', 'another_answer']}]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'multirc')\n >>> predictions = [{'idx': {'answer': 0, 'paragraph': 0, 'question': 0}, 'prediction': 0}, {'idx': {'answer': 1, 'paragraph': 2, 'question': 3}, 'prediction': 1}]\n >>> references = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'exact_match': 1.0, 'f1_m': 1.0, 'f1_a': 1.0}\n\n >>> super_glue_metric = datasets.load_metric('super_glue', 'axb')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = super_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'matthews_correlation': 1.0}\n"
def A (__A : Any , __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return float((preds == labels).mean() )
def A (__A : Optional[int] , __A : List[str] , __A : List[str]="binary" ) -> int:
"""simple docstring"""
UpperCAmelCase_ : Union[str, Any] = simple_accuracy(__A , __A )
UpperCAmelCase_ : Tuple = float(fa_score(y_true=__A , y_pred=__A , average=__A ) )
return {
"accuracy": acc,
"f1": fa,
}
def A (__A : Optional[int] , __A : Optional[int] ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase_ : Dict = {}
for id_pred, label in zip(__A , __A ):
UpperCAmelCase_ : Any = F"""{id_pred["idx"]["paragraph"]}-{id_pred["idx"]["question"]}"""
UpperCAmelCase_ : List[str] = id_pred['''prediction''']
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
UpperCAmelCase_ : str = [(pred, label)]
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = [], []
for question, preds_labels in question_map.items():
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = zip(*__A )
UpperCAmelCase_ : Tuple = fa_score(y_true=__A , y_pred=__A , average='''macro''' )
fas.append(__A )
UpperCAmelCase_ : List[str] = int(sum(pred == label for pred, label in preds_labels ) == len(__A ) )
ems.append(__A )
UpperCAmelCase_ : Dict = float(sum(__A ) / len(__A ) )
UpperCAmelCase_ : int = sum(__A ) / len(__A )
UpperCAmelCase_ : Any = float(fa_score(y_true=__A , y_pred=[id_pred['''prediction'''] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def lowerCamelCase ( self : Union[str, Any]):
"""simple docstring"""
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if not self.config_name == '''record''' and not self.config_name == '''multirc''' else None , )
def lowerCamelCase ( self : Dict):
"""simple docstring"""
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value('''int64'''),
"query": datasets.Value('''int64'''),
},
"prediction_text": datasets.Value('''string'''),
},
"references": {
"idx": {
"passage": datasets.Value('''int64'''),
"query": datasets.Value('''int64'''),
},
"answers": datasets.Sequence(datasets.Value('''string''')),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value('''int64'''),
"paragraph": datasets.Value('''int64'''),
"question": datasets.Value('''int64'''),
},
"prediction": datasets.Value('''int64'''),
},
"references": datasets.Value('''int64'''),
}
else:
return {
"predictions": datasets.Value('''int64'''),
"references": datasets.Value('''int64'''),
}
def lowerCamelCase ( self : Optional[int] , _snake_case : Dict , _snake_case : str):
"""simple docstring"""
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(_snake_case , _snake_case)}
elif self.config_name == "cb":
return acc_and_fa(_snake_case , _snake_case , fa_avg='''macro''')
elif self.config_name == "record":
UpperCAmelCase_ : Optional[int] = [
{
'''qas''': [
{'''id''': ref['''idx''']['''query'''], '''answers''': [{'''text''': ans} for ans in ref['''answers''']]}
for ref in references
]
}
]
UpperCAmelCase_ : Union[str, Any] = {pred['''idx''']['''query''']: pred['''prediction_text'''] for pred in predictions}
return evaluate_record(_snake_case , _snake_case)[0]
elif self.config_name == "multirc":
return evaluate_multirc(_snake_case , _snake_case)
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(_snake_case , _snake_case)}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["boolq", "cb", "copa", "multirc", "record", "rte", "wic", "wsc", "wsc.fixed", "axb", "axg",]''')
| 707 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class __snake_case :
def __init__( self : Dict , _snake_case : Optional[int] , _snake_case : int , _snake_case : int):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''')
UpperCAmelCase_ = img
UpperCAmelCase_ = img.shape[1]
UpperCAmelCase_ = img.shape[0]
UpperCAmelCase_ = dst_width
UpperCAmelCase_ = dst_height
UpperCAmelCase_ = self.src_w / self.dst_w
UpperCAmelCase_ = self.src_h / self.dst_h
UpperCAmelCase_ = UpperCAmelCase_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 255
)
def lowerCamelCase ( self : str):
"""simple docstring"""
for i in range(self.dst_h):
for j in range(self.dst_w):
UpperCAmelCase_ = self.img[self.get_y(_snake_case)][self.get_x(_snake_case)]
def lowerCamelCase ( self : int , _snake_case : int):
"""simple docstring"""
return int(self.ratio_x * x)
def lowerCamelCase ( self : List[str] , _snake_case : int):
"""simple docstring"""
return int(self.ratio_y * y)
if __name__ == "__main__":
snake_case_ , snake_case_ : List[Any] = 800, 600
snake_case_ : Optional[Any] = imread("image_data/lena.jpg", 1)
snake_case_ : Any = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output
)
waitKey(0)
destroyAllWindows()
| 169 | 0 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class UpperCamelCase__ ( __lowerCamelCase ):
def __lowercase( self : int, __lowerCamelCase : str ) -> Any:
with open(__lowerCamelCase, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Union[str, Any] = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase__ : Tuple = input_file.read()
UpperCamelCase__ : Optional[Any] = regexp.search(__lowerCamelCase )
return match
def __lowercase( self : Optional[int], __lowerCamelCase : str ) -> Union[str, Any]:
with open(__lowerCamelCase, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : str = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
UpperCamelCase__ : Optional[int] = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Dict = regexp.finditer(__lowerCamelCase )
UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __lowercase( self : int ) -> Dict:
UpperCamelCase__ : Optional[int] = Path('''./datasets''' )
UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__lowerCamelCase ) ):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' )
def __lowercase( self : Dict ) -> int:
UpperCamelCase__ : int = Path('''./datasets''' )
UpperCamelCase__ : Union[str, Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__lowerCamelCase ) ):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
| 344 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
import torch
from ..models.auto import AutoModelForVisualQuestionAnswering, AutoProcessor
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class UpperCamelCase__ ( __lowerCamelCase ):
a__ : Dict = 'dandelin/vilt-b32-finetuned-vqa'
a__ : List[str] = (
'This is a tool that answers a question about an image. It takes an input named `image` which should be the '
'image containing the information, as well as a `question` which should be the question in English. It '
'returns a text that is the answer to the question.'
)
a__ : List[str] = 'image_qa'
a__ : Optional[int] = AutoProcessor
a__ : List[str] = AutoModelForVisualQuestionAnswering
a__ : Union[str, Any] = ['image', 'text']
a__ : int = ['text']
def __init__( self : Tuple, *__lowerCamelCase : Union[str, Any], **__lowerCamelCase : Dict ) -> Optional[int]:
requires_backends(self, ['''vision'''] )
super().__init__(*__lowerCamelCase, **__lowerCamelCase )
def __lowercase( self : List[Any], __lowerCamelCase : "Image", __lowerCamelCase : str ) -> Optional[Any]:
return self.pre_processor(__lowerCamelCase, __lowerCamelCase, return_tensors='''pt''' )
def __lowercase( self : Optional[int], __lowerCamelCase : List[str] ) -> Any:
with torch.no_grad():
return self.model(**__lowerCamelCase ).logits
def __lowercase( self : List[str], __lowerCamelCase : Dict ) -> Dict:
UpperCamelCase__ : Union[str, Any] = outputs.argmax(-1 ).item()
return self.model.config.idalabel[idx]
| 344 | 1 |
from functools import reduce
a_ : Optional[int] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def _SCREAMING_SNAKE_CASE ( snake_case_ : str = N ):
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda snake_case_ , snake_case_ : str(int(snake_case_ ) * int(snake_case_ ) ) , n[i : i + 13] ) )
for i in range(len(snake_case_ ) - 12 ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 711 |
def _SCREAMING_SNAKE_CASE ( ):
__magic_name__ = []
__magic_name__ = 1
while len(snake_case_ ) < 1E6:
constant.append(str(snake_case_ ) )
i += 1
__magic_name__ = ''''''.join(snake_case_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution()) | 678 | 0 |
from __future__ import annotations
from collections.abc import Generator
def snake_case__ ( ):
lowerCAmelCase__ :Union[str, Any] = {}
lowerCAmelCase__ :Optional[Any] = 2
while True:
lowerCAmelCase__ :Union[str, Any] = factor_map.pop(lowercase__ , lowercase__ )
if factor:
lowerCAmelCase__ :List[Any] = factor + prime
while x in factor_map:
x += factor
lowerCAmelCase__ :List[Any] = factor
else:
lowerCAmelCase__ :Optional[Any] = prime
yield prime
prime += 1
def snake_case__ ( UpperCAmelCase : float = 1E10 ):
lowerCAmelCase__ :Any = sieve()
lowerCAmelCase__ :int = 1
while True:
lowerCAmelCase__ :List[Any] = next(lowercase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowercase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 145 |
'''simple docstring'''
def __UpperCamelCase ( lowercase__ : int ):
'''simple docstring'''
if upper_limit < 0:
raise ValueError('Limit for the Catalan sequence must be ≥ 0' )
__lowercase =[0] * (upper_limit + 1)
# Base case: C(0) = C(1) = 1
__lowercase =1
if upper_limit > 0:
__lowercase =1
# Recurrence relation: C(i) = sum(C(j).C(i-j-1)), from j = 0 to i
for i in range(2, upper_limit + 1 ):
for j in range(lowercase__ ):
catalan_list[i] += catalan_list[j] * catalan_list[i - j - 1]
return catalan_list
if __name__ == "__main__":
print('''\n********* Catalan Numbers Using Dynamic Programming ************\n''')
print('''\n*** Enter -1 at any time to quit ***''')
print('''\nEnter the upper limit (≥ 0) for the Catalan number sequence: ''', end='''''')
try:
while True:
UpperCAmelCase = int(input().strip())
if N < 0:
print('''\n********* Goodbye!! ************''')
break
else:
print(F'''The Catalan numbers from 0 through {N} are:''')
print(catalan_numbers(N))
print('''Try another upper limit for the sequence: ''', end='''''')
except (NameError, ValueError):
print('''\n********* Invalid input, goodbye! ************\n''')
import doctest
doctest.testmod()
| 119 | 0 |
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 715 |
A_ : Any = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Tuple = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
A_ : Optional[int] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( UpperCAmelCase__ :int , UpperCAmelCase__ :int , UpperCAmelCase__ :int ):
'''simple docstring'''
assert len(str(UpperCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
a = year // 1_00
a = (5 * (century % 4) + 2) % 7
a = year % 1_00
a = centurian % 12
a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0)
else DOOMSDAY_LEAP[month - 1]
)
a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 32 | 0 |
"""simple docstring"""
import argparse
import os
from . import (
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
AlbertConfig,
BartConfig,
BertConfig,
CamembertConfig,
CTRLConfig,
DistilBertConfig,
DPRConfig,
ElectraConfig,
FlaubertConfig,
GPTaConfig,
LayoutLMConfig,
LxmertConfig,
OpenAIGPTConfig,
RobertaConfig,
TaConfig,
TFAlbertForPreTraining,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFCamembertForMaskedLM,
TFCTRLLMHeadModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
TFElectraForPreTraining,
TFFlaubertWithLMHeadModel,
TFGPTaLMHeadModel,
TFLayoutLMForMaskedLM,
TFLxmertForPreTraining,
TFLxmertVisualFeatureEncoder,
TFOpenAIGPTLMHeadModel,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForSequenceClassification,
TFTaForConditionalGeneration,
TFTransfoXLLMHeadModel,
TFWavaVecaModel,
TFXLMRobertaForMaskedLM,
TFXLMWithLMHeadModel,
TFXLNetLMHeadModel,
TransfoXLConfig,
WavaVecaConfig,
WavaVecaModel,
XLMConfig,
XLMRobertaConfig,
XLNetConfig,
is_torch_available,
load_pytorch_checkpoint_in_tfa_model,
)
from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging
if is_torch_available():
import numpy as np
import torch
from . import (
AlbertForPreTraining,
BartForConditionalGeneration,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
CamembertForMaskedLM,
CTRLLMHeadModel,
DistilBertForMaskedLM,
DistilBertForQuestionAnswering,
DPRContextEncoder,
DPRQuestionEncoder,
DPRReader,
ElectraForPreTraining,
FlaubertWithLMHeadModel,
GPTaLMHeadModel,
LayoutLMForMaskedLM,
LxmertForPreTraining,
LxmertVisualFeatureEncoder,
OpenAIGPTLMHeadModel,
RobertaForMaskedLM,
RobertaForSequenceClassification,
TaForConditionalGeneration,
TransfoXLLMHeadModel,
XLMRobertaForMaskedLM,
XLMWithLMHeadModel,
XLNetLMHeadModel,
)
logging.set_verbosity_info()
A = {
"""bart""": (
BartConfig,
TFBartForConditionalGeneration,
TFBartForSequenceClassification,
BartForConditionalGeneration,
BART_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""bert""": (
BertConfig,
TFBertForPreTraining,
BertForPreTraining,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-uncased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-large-cased-whole-word-masking-finetuned-squad""": (
BertConfig,
TFBertForQuestionAnswering,
BertForQuestionAnswering,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""bert-base-cased-finetuned-mrpc""": (
BertConfig,
TFBertForSequenceClassification,
BertForSequenceClassification,
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""dpr""": (
DPRConfig,
TFDPRQuestionEncoder,
TFDPRContextEncoder,
TFDPRReader,
DPRQuestionEncoder,
DPRContextEncoder,
DPRReader,
DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""gpt2""": (
GPTaConfig,
TFGPTaLMHeadModel,
GPTaLMHeadModel,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlnet""": (
XLNetConfig,
TFXLNetLMHeadModel,
XLNetLMHeadModel,
XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm""": (
XLMConfig,
TFXLMWithLMHeadModel,
XLMWithLMHeadModel,
XLM_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""xlm-roberta""": (
XLMRobertaConfig,
TFXLMRobertaForMaskedLM,
XLMRobertaForMaskedLM,
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""transfo-xl""": (
TransfoXLConfig,
TFTransfoXLLMHeadModel,
TransfoXLLMHeadModel,
TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""openai-gpt""": (
OpenAIGPTConfig,
TFOpenAIGPTLMHeadModel,
OpenAIGPTLMHeadModel,
OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""roberta""": (
RobertaConfig,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
RobertaForMaskedLM,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""layoutlm""": (
LayoutLMConfig,
TFLayoutLMForMaskedLM,
LayoutLMForMaskedLM,
LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
),
"""roberta-large-mnli""": (
RobertaConfig,
TFRobertaForSequenceClassification,
RobertaForSequenceClassification,
ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""camembert""": (
CamembertConfig,
TFCamembertForMaskedLM,
CamembertForMaskedLM,
CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""flaubert""": (
FlaubertConfig,
TFFlaubertWithLMHeadModel,
FlaubertWithLMHeadModel,
FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert""": (
DistilBertConfig,
TFDistilBertForMaskedLM,
DistilBertForMaskedLM,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""distilbert-base-distilled-squad""": (
DistilBertConfig,
TFDistilBertForQuestionAnswering,
DistilBertForQuestionAnswering,
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert""": (
LxmertConfig,
TFLxmertForPreTraining,
LxmertForPreTraining,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""lxmert-visual-feature-encoder""": (
LxmertConfig,
TFLxmertVisualFeatureEncoder,
LxmertVisualFeatureEncoder,
LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""ctrl""": (
CTRLConfig,
TFCTRLLMHeadModel,
CTRLLMHeadModel,
CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""albert""": (
AlbertConfig,
TFAlbertForPreTraining,
AlbertForPreTraining,
ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""t5""": (
TaConfig,
TFTaForConditionalGeneration,
TaForConditionalGeneration,
T5_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""electra""": (
ElectraConfig,
TFElectraForPreTraining,
ElectraForPreTraining,
ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
"""wav2vec2""": (
WavaVecaConfig,
TFWavaVecaModel,
WavaVecaModel,
WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
),
}
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase=False , UpperCamelCase=True ) -> Optional[Any]:
"""simple docstring"""
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = MODEL_CLASSES[model_type]
# Initialise TF model
if config_file in aws_config_map:
__UpperCAmelCase : List[str] = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
__UpperCAmelCase : Tuple = config_class.from_json_file(UpperCamelCase )
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : str = True
print(f"Building TensorFlow model from configuration: {config}" )
__UpperCAmelCase : Union[str, Any] = model_class(UpperCamelCase )
# Load weights from tf checkpoint
if pytorch_checkpoint_path in aws_config_map.keys():
__UpperCAmelCase : int = cached_file(
UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
# Load PyTorch checkpoint in tf2 model:
__UpperCAmelCase : Union[str, Any] = load_pytorch_checkpoint_in_tfa_model(UpperCamelCase , UpperCamelCase )
if compare_with_pt_model:
__UpperCAmelCase : int = tf_model(tf_model.dummy_inputs , training=UpperCamelCase ) # build the network
__UpperCAmelCase : str = torch.load(UpperCamelCase , map_location="cpu" )
__UpperCAmelCase : List[Any] = pt_model_class.from_pretrained(
pretrained_model_name_or_path=UpperCamelCase , config=UpperCamelCase , state_dict=UpperCamelCase )
with torch.no_grad():
__UpperCAmelCase : Dict = pt_model(**pt_model.dummy_inputs )
__UpperCAmelCase : Optional[int] = pto[0].numpy()
__UpperCAmelCase : List[str] = tfo[0].numpy()
__UpperCAmelCase : List[Any] = np.amax(np.abs(np_pt - np_tf ) )
print(f"Max absolute difference between models outputs {diff}" )
assert diff <= 2e-2, f"Error, model absolute difference is >2e-2: {diff}"
# Save pytorch-model
print(f"Save TensorFlow model to {tf_dump_path}" )
tf_model.save_weights(UpperCamelCase , save_format="h5" )
def _UpperCamelCase ( UpperCamelCase , UpperCamelCase , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , UpperCamelCase=False , ) -> Any:
"""simple docstring"""
if args_model_type is None:
__UpperCAmelCase : Dict = list(MODEL_CLASSES.keys() )
else:
__UpperCAmelCase : List[Any] = [args_model_type]
for j, model_type in enumerate(UpperCamelCase , start=1 ):
print("=" * 100 )
print(f" Converting model type {j}/{len(UpperCamelCase )}: {model_type}" )
print("=" * 100 )
if model_type not in MODEL_CLASSES:
raise ValueError(f"Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}." )
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : List[Any] = MODEL_CLASSES[model_type]
if model_shortcut_names_or_path is None:
__UpperCAmelCase : List[Any] = list(aws_model_maps.keys() )
if config_shortcut_names_or_path is None:
__UpperCAmelCase : int = model_shortcut_names_or_path
for i, (model_shortcut_name, config_shortcut_name) in enumerate(
zip(UpperCamelCase , UpperCamelCase ) , start=1 ):
print("-" * 100 )
if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name:
if not only_convert_finetuned_models:
print(f" Skipping finetuned checkpoint {model_shortcut_name}" )
continue
__UpperCAmelCase : Optional[Any] = model_shortcut_name
elif only_convert_finetuned_models:
print(f" Skipping not finetuned checkpoint {model_shortcut_name}" )
continue
print(
f" Converting checkpoint {i}/{len(UpperCamelCase )}: {model_shortcut_name} - model_type {model_type}" )
print("-" * 100 )
if config_shortcut_name in aws_config_map:
__UpperCAmelCase : Dict = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
else:
__UpperCAmelCase : Optional[Any] = config_shortcut_name
if model_shortcut_name in aws_model_maps:
__UpperCAmelCase : int = cached_file(UpperCamelCase , UpperCamelCase , force_download=not use_cached_models )
else:
__UpperCAmelCase : List[str] = model_shortcut_name
if os.path.isfile(UpperCamelCase ):
__UpperCAmelCase : Tuple = "converted_model"
convert_pt_checkpoint_to_tf(
model_type=UpperCamelCase , pytorch_checkpoint_path=UpperCamelCase , config_file=UpperCamelCase , tf_dump_path=os.path.join(UpperCamelCase , model_shortcut_name + "-tf_model.h5" ) , compare_with_pt_model=UpperCamelCase , )
if remove_cached_files:
os.remove(UpperCamelCase )
os.remove(UpperCamelCase )
if __name__ == "__main__":
A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_dump_path""", default=None, type=str, required=True, help="""Path to the output Tensorflow dump file."""
)
parser.add_argument(
"""--model_type""",
default=None,
type=str,
help=(
f'''Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and '''
"""convert all the models from AWS."""
),
)
parser.add_argument(
"""--pytorch_checkpoint_path""",
default=None,
type=str,
help=(
"""Path to the PyTorch checkpoint path or shortcut name to download from AWS. """
"""If not given, will download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
help=(
"""The config json file corresponding to the pre-trained model. \n"""
"""This specifies the model architecture. If not given and """
"""--pytorch_checkpoint_path is not given or is a shortcut name """
"""use the configuration associated to the shortcut name on the AWS"""
),
)
parser.add_argument(
"""--compare_with_pt_model""", action="""store_true""", help="""Compare Tensorflow and PyTorch model predictions."""
)
parser.add_argument(
"""--use_cached_models""",
action="""store_true""",
help="""Use cached models if possible instead of updating to latest checkpoint versions.""",
)
parser.add_argument(
"""--remove_cached_files""",
action="""store_true""",
help="""Remove pytorch models after conversion (save memory when converting in batches).""",
)
parser.add_argument("""--only_convert_finetuned_models""", action="""store_true""", help="""Only convert finetuned models.""")
A = parser.parse_args()
# if args.pytorch_checkpoint_path is not None:
# convert_pt_checkpoint_to_tf(args.model_type.lower(),
# args.pytorch_checkpoint_path,
# args.config_file if args.config_file is not None else args.pytorch_checkpoint_path,
# args.tf_dump_path,
# compare_with_pt_model=args.compare_with_pt_model,
# use_cached_models=args.use_cached_models)
# else:
convert_all_pt_checkpoints_to_tf(
args.model_type.lower() if args.model_type is not None else None,
args.tf_dump_path,
model_shortcut_names_or_path=[args.pytorch_checkpoint_path]
if args.pytorch_checkpoint_path is not None
else None,
config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None,
compare_with_pt_model=args.compare_with_pt_model,
use_cached_models=args.use_cached_models,
remove_cached_files=args.remove_cached_files,
only_convert_finetuned_models=args.only_convert_finetuned_models,
)
| 77 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_UpperCamelCase : Optional[int] = {
"configuration_vision_text_dual_encoder": ["VisionTextDualEncoderConfig"],
"processing_vision_text_dual_encoder": ["VisionTextDualEncoderProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Dict = ["VisionTextDualEncoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : int = ["FlaxVisionTextDualEncoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase : Tuple = ["TFVisionTextDualEncoderModel"]
if TYPE_CHECKING:
from .configuration_vision_text_dual_encoder import VisionTextDualEncoderConfig
from .processing_vision_text_dual_encoder import VisionTextDualEncoderProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_text_dual_encoder import VisionTextDualEncoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_text_dual_encoder import FlaxVisionTextDualEncoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_text_dual_encoder import TFVisionTextDualEncoderModel
else:
import sys
_UpperCamelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 284 | 0 |
"""simple docstring"""
import os
from pathlib import Path
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : Union[str, Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
lowerCAmelCase__ : int = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
lowerCAmelCase__ : str = f"""{src_lang}-{tgt_lang}"""
lowerCAmelCase__ : List[str] = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
lowerCAmelCase__ : Tuple = os.path.join(__UpperCAmelCase , """README.md""" )
print(f"""Generating {path}""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(__UpperCAmelCase )
# make sure we are under the root of the project
_A = Path(__file__).resolve().parent.parent.parent
_A = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
_A , _A , _A = model_name.split("""-""")
_A = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 709 |
"""simple docstring"""
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
_A = logging.get_logger()
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :List[nn.Module] = field(default_factory=a_ )
_lowerCamelCase :list = field(default_factory=a_ )
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Tensor , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Dict = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase , nn.Convad ) or isinstance(UpperCamelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase )
def __call__( self : int , UpperCamelCase : Tensor ) -> Tuple:
"""simple docstring"""
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase )
[x.remove() for x in self.handles]
return self
@property
def _lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _lowerCamelCase :
_lowerCamelCase :nn.Module
_lowerCamelCase :nn.Module
_lowerCamelCase :int = 0
_lowerCamelCase :List = field(default_factory=a_ )
_lowerCamelCase :List = field(default_factory=a_ )
def __call__( self : str , UpperCamelCase : Tensor ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = Tracker(self.dest )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Union[str, Any] = Tracker(self.src )(UpperCamelCase ).parametrized
lowerCAmelCase__ : Any = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.src_skip , UpperCamelCase ) )
lowerCAmelCase__ : int = list(filter(lambda UpperCamelCase : type(UpperCamelCase ) not in self.dest_skip , UpperCamelCase ) )
if len(UpperCamelCase ) != len(UpperCamelCase ):
raise Exception(
f"""Numbers of operations are different. Source module has {len(UpperCamelCase )} operations while"""
f""" destination module has {len(UpperCamelCase )}.""" )
for dest_m, src_m in zip(UpperCamelCase , UpperCamelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f"""Transfered from={src_m} to={dest_m}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = True ) -> List[str]:
print(f"""Converting {name}...""" )
with torch.no_grad():
lowerCAmelCase__ : Any = timm.create_model(__UpperCAmelCase , pretrained=__UpperCAmelCase ).eval()
lowerCAmelCase__ : int = ResNetForImageClassification(__UpperCAmelCase ).eval()
lowerCAmelCase__ : List[str] = ModuleTransfer(src=__UpperCAmelCase , dest=__UpperCAmelCase )
lowerCAmelCase__ : str = torch.randn((1, 3, 224, 224) )
module_transfer(__UpperCAmelCase )
assert torch.allclose(from_model(__UpperCAmelCase ) , our_model(__UpperCAmelCase ).logits ), "The model logits don't match the original one."
lowerCAmelCase__ : int = f"""resnet{'-'.join(name.split('resnet' ) )}"""
print(__UpperCAmelCase )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add model""" , use_temp_dir=__UpperCAmelCase , )
# we can use the convnext one
lowerCAmelCase__ : Tuple = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message="""Add image processor""" , use_temp_dir=__UpperCAmelCase , )
print(f"""Pushed {checkpoint_name}""" )
def lowercase_ ( __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = True ) -> List[str]:
lowerCAmelCase__ : Dict = """imagenet-1k-id2label.json"""
lowerCAmelCase__ : Any = 1000
lowerCAmelCase__ : Optional[int] = (1, num_labels)
lowerCAmelCase__ : List[Any] = """huggingface/label-files"""
lowerCAmelCase__ : int = num_labels
lowerCAmelCase__ : Any = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
lowerCAmelCase__ : Optional[Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Optional[int] = idalabel
lowerCAmelCase__ : Optional[Any] = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : Union[str, Any] = partial(__UpperCAmelCase , num_labels=__UpperCAmelCase , idalabel=__UpperCAmelCase , labelaid=__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = {
"""resnet18""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet26""": ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet34""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type="""basic""" ),
"""resnet50""": ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet101""": ImageNetPreTrainedConfig(
depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
"""resnet152""": ImageNetPreTrainedConfig(
depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type="""bottleneck""" ),
}
if model_name:
convert_weight_and_push(__UpperCAmelCase , names_to_config[model_name] , __UpperCAmelCase , __UpperCAmelCase )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return config, expected_shape
if __name__ == "__main__":
_A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help=(
"""The name of the model you wish to convert, it must be one of the supported resnet* architecture,"""
""" currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=Path,
required=True,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""",
default=True,
type=bool,
required=False,
help="""If True, push model and image processor to the hub.""",
)
_A = parser.parse_args()
_A = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 507 | 0 |
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class __A :
def __init__( self :Union[str, Any] , __snake_case :Tuple , __snake_case :List[Any]=2 , __snake_case :str=3 , __snake_case :List[str]=4 , __snake_case :Optional[int]=2 , __snake_case :int=7 , __snake_case :Optional[int]=True , __snake_case :List[Any]=True , __snake_case :Any=True , __snake_case :Any=True , __snake_case :Union[str, Any]=99 , __snake_case :int=36 , __snake_case :int=3 , __snake_case :Union[str, Any]=4 , __snake_case :List[str]=37 , __snake_case :Any="gelu" , __snake_case :str=0.1 , __snake_case :str=0.1 , __snake_case :Union[str, Any]=5_12 , __snake_case :Optional[Any]=16 , __snake_case :int=2 , __snake_case :List[Any]=0.02 , __snake_case :List[str]=6 , __snake_case :Tuple=6 , __snake_case :Any=3 , __snake_case :Any=4 , __snake_case :str=None , __snake_case :Tuple=10_00 , ):
'''simple docstring'''
__magic_name__ : int =parent
__magic_name__ : List[str] =batch_size
__magic_name__ : int =num_channels
__magic_name__ : Optional[int] =image_size
__magic_name__ : Any =patch_size
__magic_name__ : Tuple =text_seq_length
__magic_name__ : List[str] =is_training
__magic_name__ : str =use_input_mask
__magic_name__ : Optional[int] =use_token_type_ids
__magic_name__ : Union[str, Any] =use_labels
__magic_name__ : Dict =vocab_size
__magic_name__ : List[Any] =hidden_size
__magic_name__ : Optional[int] =num_hidden_layers
__magic_name__ : Optional[Any] =num_attention_heads
__magic_name__ : List[str] =intermediate_size
__magic_name__ : Any =hidden_act
__magic_name__ : str =hidden_dropout_prob
__magic_name__ : Any =attention_probs_dropout_prob
__magic_name__ : Optional[int] =max_position_embeddings
__magic_name__ : List[Any] =type_vocab_size
__magic_name__ : Optional[int] =type_sequence_label_size
__magic_name__ : List[Any] =initializer_range
__magic_name__ : int =coordinate_size
__magic_name__ : Dict =shape_size
__magic_name__ : Union[str, Any] =num_labels
__magic_name__ : Tuple =num_choices
__magic_name__ : Tuple =scope
__magic_name__ : Optional[Any] =range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
__magic_name__ : Any =text_seq_length
__magic_name__ : Optional[int] =(image_size // patch_size) ** 2 + 1
__magic_name__ : Optional[Any] =self.text_seq_length + self.image_seq_length
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
__magic_name__ : Any =ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__magic_name__ : str =bbox[i, j, 3]
__magic_name__ : Optional[int] =bbox[i, j, 1]
__magic_name__ : Optional[int] =t
if bbox[i, j, 2] < bbox[i, j, 0]:
__magic_name__ : Dict =bbox[i, j, 2]
__magic_name__ : Optional[int] =bbox[i, j, 0]
__magic_name__ : Optional[Any] =t
__magic_name__ : int =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__magic_name__ : Union[str, Any] =None
if self.use_input_mask:
__magic_name__ : str =random_attention_mask([self.batch_size, self.text_seq_length] )
__magic_name__ : Optional[Any] =None
if self.use_token_type_ids:
__magic_name__ : Dict =ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
__magic_name__ : int =None
__magic_name__ : List[str] =None
if self.use_labels:
__magic_name__ : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : List[Any] =ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
__magic_name__ : int =LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def A__ ( self :Tuple , __snake_case :Optional[Any] , __snake_case :int , __snake_case :Union[str, Any] , __snake_case :Union[str, Any] , __snake_case :int , __snake_case :Dict , __snake_case :List[Any] , __snake_case :Tuple ):
'''simple docstring'''
__magic_name__ : Any =LayoutLMvaModel(config=__snake_case )
model.to(__snake_case )
model.eval()
# text + image
__magic_name__ : List[Any] =model(__snake_case , pixel_values=__snake_case )
__magic_name__ : str =model(
__snake_case , bbox=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
__magic_name__ : int =model(__snake_case , bbox=__snake_case , pixel_values=__snake_case , token_type_ids=__snake_case )
__magic_name__ : Optional[int] =model(__snake_case , bbox=__snake_case , pixel_values=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
__magic_name__ : int =model(__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
__magic_name__ : Union[str, Any] =model(pixel_values=__snake_case )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def A__ ( self :Union[str, Any] , __snake_case :Union[str, Any] , __snake_case :Union[str, Any] , __snake_case :Tuple , __snake_case :Tuple , __snake_case :List[str] , __snake_case :Tuple , __snake_case :str , __snake_case :str ):
'''simple docstring'''
__magic_name__ : List[Any] =self.num_labels
__magic_name__ : List[Any] =LayoutLMvaForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : Dict =model(
__snake_case , bbox=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self :Tuple , __snake_case :Any , __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :Dict , __snake_case :Any , __snake_case :List[Any] , __snake_case :List[str] , __snake_case :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.num_labels
__magic_name__ : List[Any] =LayoutLMvaForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : List[Any] =model(
__snake_case , bbox=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def A__ ( self :Any , __snake_case :Union[str, Any] , __snake_case :Optional[int] , __snake_case :int , __snake_case :Union[str, Any] , __snake_case :Tuple , __snake_case :Optional[Any] , __snake_case :Optional[int] , __snake_case :Optional[Any] ):
'''simple docstring'''
__magic_name__ : str =LayoutLMvaForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
__magic_name__ : List[Any] =model(
__snake_case , bbox=__snake_case , pixel_values=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : str =self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : List[str] =config_and_inputs
__magic_name__ : Optional[Any] ={
"""input_ids""": input_ids,
"""bbox""": bbox,
"""pixel_values""": pixel_values,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __A ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCamelCase = (
{"""document-question-answering""": LayoutLMvaForQuestionAnswering, """feature-extraction""": LayoutLMvaModel}
if is_torch_available()
else {}
)
def A__ ( self :str , __snake_case :Tuple , __snake_case :Optional[Any] , __snake_case :Optional[int] , __snake_case :Optional[int] , __snake_case :Dict ):
'''simple docstring'''
return True
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : Dict =LayoutLMvaModelTester(self )
__magic_name__ : List[str] =ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A__ ( self :List[Any] , __snake_case :int , __snake_case :Tuple , __snake_case :Tuple=False ):
'''simple docstring'''
__magic_name__ : str =copy.deepcopy(__snake_case )
if model_class in get_values(__snake_case ):
__magic_name__ : Optional[Any] ={
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(__snake_case , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(__snake_case ):
__magic_name__ : Dict =torch.ones(self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
elif model_class in get_values(__snake_case ):
__magic_name__ : Dict =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
__magic_name__ : List[str] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
elif model_class in [
*get_values(__snake_case ),
]:
__magic_name__ : int =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__snake_case )
elif model_class in [
*get_values(__snake_case ),
]:
__magic_name__ : Optional[int] =torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=__snake_case , )
return inputs_dict
def A__ ( self :Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ : Union[str, Any] =type
self.model_tester.create_and_check_model(*__snake_case )
def A__ ( self :List[Any] ):
'''simple docstring'''
__magic_name__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
__magic_name__ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
def A__ ( self :int ):
'''simple docstring'''
__magic_name__ : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
@slow
def A__ ( self :Dict ):
'''simple docstring'''
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int =LayoutLMvaModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def lowerCAmelCase_ ( ):
__magic_name__ : List[str] =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
class __A ( unittest.TestCase ):
@cached_property
def A__ ( self :str ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=__snake_case ) if is_vision_available() else None
@slow
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : List[Any] =LayoutLMvaModel.from_pretrained("""microsoft/layoutlmv3-base""" ).to(__snake_case )
__magic_name__ : Optional[int] =self.default_image_processor
__magic_name__ : List[Any] =prepare_img()
__magic_name__ : Dict =image_processor(images=__snake_case , return_tensors="""pt""" ).pixel_values.to(__snake_case )
__magic_name__ : Any =torch.tensor([[1, 2]] )
__magic_name__ : List[str] =torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
__magic_name__ : Tuple =model(
input_ids=input_ids.to(__snake_case ) , bbox=bbox.to(__snake_case ) , pixel_values=pixel_values.to(__snake_case ) , )
# verify the logits
__magic_name__ : Dict =torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , __snake_case )
__magic_name__ : List[str] =torch.tensor(
[[-0.0529, 0.3618, 0.1632], [-0.1587, -0.1667, -0.0400], [-0.1557, -0.1671, -0.0505]] ).to(__snake_case )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __snake_case , atol=1E-4 ) )
| 21 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
lowercase : List[str] = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
lowercase : str = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
lowercase : Optional[int] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def __a ( A__ ) -> Optional[Any]:
with open(A__ , "rb" ) as f:
lowerCAmelCase = Image.open(A__ )
return im.convert("RGB" )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': 'Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub).'
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the training data.'} )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'A folder containing the validation data.'} )
lowerCAmelCase = field(
default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def __A ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"You must specify either a dataset name from the hub or a train and/or validation directory." )
@dataclass
class _lowerCAmelCase :
"""simple docstring"""
lowerCAmelCase = field(
default='google/vit-base-patch16-224-in21k' , metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(UpperCamelCase_ )} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} )
lowerCAmelCase = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
lowerCAmelCase = field(default=UpperCamelCase_ , metadata={'help': 'Name or path of preprocessor config.'} )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
lowerCAmelCase = field(
default=UpperCamelCase_ , metadata={'help': 'Will enable to load a pretrained model whose head dimensions are different.'} , )
def __a ( A__ ) -> Any:
lowerCAmelCase = torch.stack([example["pixel_values"] for example in examples] )
lowerCAmelCase = torch.tensor([example["labels"] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def __a ( ) -> Tuple:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_classification" , A__ , A__ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowerCAmelCase = training_args.get_process_log_level()
logger.setLevel(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" )
logger.info(f"Training/evaluation parameters {training_args}" )
# Detecting last checkpoint.
lowerCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowerCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
lowerCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="image-classification" , use_auth_token=True if model_args.use_auth_token else None , )
else:
lowerCAmelCase = {}
if data_args.train_dir is not None:
lowerCAmelCase = os.path.join(data_args.train_dir , "**" )
if data_args.validation_dir is not None:
lowerCAmelCase = os.path.join(data_args.validation_dir , "**" )
lowerCAmelCase = load_dataset(
"imagefolder" , data_files=A__ , cache_dir=model_args.cache_dir , task="image-classification" , )
# If we don't have a validation split, split off a percentage of train as validation.
lowerCAmelCase = None if "validation" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , A__ ) and data_args.train_val_split > 0.0:
lowerCAmelCase = dataset["train"].train_test_split(data_args.train_val_split )
lowerCAmelCase = split["train"]
lowerCAmelCase = split["test"]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
lowerCAmelCase = dataset["train"].features["labels"].names
lowerCAmelCase , lowerCAmelCase = {}, {}
for i, label in enumerate(A__ ):
lowerCAmelCase = str(A__ )
lowerCAmelCase = label
# Load the accuracy metric from the datasets package
lowerCAmelCase = evaluate.load("accuracy" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A__ ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
lowerCAmelCase = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(A__ ) , labelaid=A__ , idalabel=A__ , finetuning_task="image-classification" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
lowerCAmelCase = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
lowerCAmelCase = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
lowerCAmelCase = image_processor.size["shortest_edge"]
else:
lowerCAmelCase = (image_processor.size["height"], image_processor.size["width"])
lowerCAmelCase = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
lowerCAmelCase = Compose(
[
RandomResizedCrop(A__ ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
lowerCAmelCase = Compose(
[
Resize(A__ ),
CenterCrop(A__ ),
ToTensor(),
normalize,
] )
def train_transforms(A__ ):
lowerCAmelCase = [
_train_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]
]
return example_batch
def val_transforms(A__ ):
lowerCAmelCase = [_val_transforms(pil_img.convert("RGB" ) ) for pil_img in example_batch["image"]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
lowerCAmelCase = (
dataset["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(A__ )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
lowerCAmelCase = (
dataset["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(A__ )
# Initalize our trainer
lowerCAmelCase = Trainer(
model=A__ , args=A__ , train_dataset=dataset["train"] if training_args.do_train else None , eval_dataset=dataset["validation"] if training_args.do_eval else None , compute_metrics=A__ , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
lowerCAmelCase = None
if training_args.resume_from_checkpoint is not None:
lowerCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowerCAmelCase = last_checkpoint
lowerCAmelCase = trainer.train(resume_from_checkpoint=A__ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
lowerCAmelCase = trainer.evaluate()
trainer.log_metrics("eval" , A__ )
trainer.save_metrics("eval" , A__ )
# Write model card and (optionally) push to hub
lowerCAmelCase = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "image-classification",
"dataset": data_args.dataset_name,
"tags": ["image-classification", "vision"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**A__ )
else:
trainer.create_model_card(**A__ )
if __name__ == "__main__":
main()
| 649 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class _UpperCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
UpperCamelCase = StableDiffusionSAGPipeline
UpperCamelCase = TEXT_TO_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_BATCH_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase = False
def lowerCamelCase ( self :Optional[int] ):
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
A = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
A = CLIPTextModel(__UpperCamelCase )
A = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
A = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def lowerCamelCase ( self :str , __UpperCamelCase :Optional[int] , __UpperCamelCase :Union[str, Any]=0 ):
if str(__UpperCamelCase ).startswith("mps" ):
A = torch.manual_seed(__UpperCamelCase )
else:
A = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
A = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def lowerCamelCase ( self :List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase ( self :Union[str, Any] ):
A = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
A = sag_pipe.to(__UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "."
A = torch.manual_seed(0 )
A = sag_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A = np.array([0.1_568, 0.1_738, 0.1_695, 0.1_693, 0.1_507, 0.1_705, 0.1_547, 0.1_751, 0.1_949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCamelCase ( self :Tuple ):
A = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A = sag_pipe.to(__UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "."
A = torch.manual_seed(0 )
A = sag_pipe(
[prompt] , generator=__UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
A = np.array([0.3_459, 0.2_876, 0.2_537, 0.3_002, 0.2_671, 0.2_160, 0.3_026, 0.2_262, 0.2_371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def lowerCamelCase ( self :Dict ):
A = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
A = sag_pipe.to(__UpperCamelCase )
sag_pipe.set_progress_bar_config(disable=__UpperCamelCase )
A = "."
A = torch.manual_seed(0 )
A = sag_pipe(
[prompt] , width=7_68 , height=5_12 , generator=__UpperCamelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
A = output.images
assert image.shape == (1, 5_12, 7_68, 3)
| 524 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
_snake_case : str = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, 'utils'))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
_snake_case : Optional[int] = ' \"""\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"""\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n'
class _UpperCAmelCase ( unittest.TestCase ):
def lowerCamelCase ( self :List[Any] ):
A = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , "schedulers/" ) )
A = self.diffusers_dir
shutil.copy(
os.path.join(__UpperCamelCase , "src/diffusers/schedulers/scheduling_ddpm.py" ) , os.path.join(self.diffusers_dir , "schedulers/scheduling_ddpm.py" ) , )
def lowerCamelCase ( self :Optional[int] ):
A = "src/diffusers"
shutil.rmtree(self.diffusers_dir )
def lowerCamelCase ( self :List[Any] , __UpperCamelCase :Tuple , __UpperCamelCase :Optional[Any] , __UpperCamelCase :Any , __UpperCamelCase :Optional[Any]=None ):
A = comment + f"\nclass {class_name}(nn.Module):\n" + class_code
if overwrite_result is not None:
A = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
A = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
A = os.path.join(self.diffusers_dir , "new_code.py" )
with open(__UpperCamelCase , "w" , newline="\n" ) as f:
f.write(__UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCamelCase )
with open(__UpperCamelCase , "r" ) as f:
self.assertTrue(f.read() , __UpperCamelCase )
def lowerCamelCase ( self :Tuple ):
A = check_copies.find_code_in_diffusers("schedulers.scheduling_ddpm.DDPMSchedulerOutput" )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def lowerCamelCase ( self :Union[str, Any] ):
# Base copy consistency
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , REFERENCE_CODE + "\n" , )
# With no empty line at the end
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput" , "DDPMSchedulerOutput" , __UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , re.sub("DDPM" , "Test" , __UpperCamelCase ) , )
# Copy consistency with a really long name
A = "TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"
self.check_copy_consistency(
f"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}" , f"{long_class_name}SchedulerOutput" , re.sub("Bert" , __UpperCamelCase , __UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test" , "TestSchedulerOutput" , __UpperCamelCase , overwrite_result=re.sub("DDPM" , "Test" , __UpperCamelCase ) , )
| 524 | 1 |
"""simple docstring"""
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
_A = logging.getLogger(__name__)
class _lowerCamelCase ( a_ ):
_lowerCamelCase :Optional[int] = "sequence-classification"
def __init__( self : List[Any] , UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
if type(__lowercase ) == dict:
lowerCAmelCase__ : Optional[int] = Namespace(**__lowercase )
lowerCAmelCase__ : Union[str, Any] = glue_output_modes[hparams.task]
lowerCAmelCase__ : List[Any] = glue_tasks_num_labels[hparams.task]
super().__init__(__lowercase , __lowercase , self.mode )
def _lowerCAmelCase ( self : str , **UpperCamelCase : int ) -> Optional[Any]:
"""simple docstring"""
return self.model(**__lowercase )
def _lowerCAmelCase ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ : int = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase__ : int = self(**__lowercase )
lowerCAmelCase__ : Any = outputs[0]
lowerCAmelCase__ : Optional[int] = self.trainer.lr_schedulers[0]['''scheduler''']
lowerCAmelCase__ : Dict = {'''loss''': loss, '''rate''': lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def _lowerCAmelCase ( self : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Dict = self.hparams
lowerCAmelCase__ : Tuple = processors[args.task]()
lowerCAmelCase__ : Union[str, Any] = processor.get_labels()
for mode in ["train", "dev"]:
lowerCAmelCase__ : List[Any] = self._feature_file(__lowercase )
if os.path.exists(__lowercase ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , __lowercase )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
lowerCAmelCase__ : Optional[int] = (
processor.get_dev_examples(args.data_dir )
if mode == '''dev'''
else processor.get_train_examples(args.data_dir )
)
lowerCAmelCase__ : List[str] = convert_examples_to_features(
__lowercase , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , __lowercase )
torch.save(__lowercase , __lowercase )
def _lowerCAmelCase ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : bool = False ) -> DataLoader:
"""simple docstring"""
lowerCAmelCase__ : Tuple = '''dev''' if mode == '''test''' else mode
lowerCAmelCase__ : int = self._feature_file(__lowercase )
logger.info("""Loading features from cached file %s""" , __lowercase )
lowerCAmelCase__ : List[Any] = torch.load(__lowercase )
lowerCAmelCase__ : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
lowerCAmelCase__ : Optional[int] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
lowerCAmelCase__ : int = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ : Any = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ : Optional[Any] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(__lowercase , __lowercase , __lowercase , __lowercase ) , batch_size=__lowercase , shuffle=__lowercase , )
def _lowerCAmelCase ( self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : str ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
lowerCAmelCase__ : Optional[int] = batch[2] if self.config.model_type in ['''bert''', '''xlnet''', '''albert'''] else None
lowerCAmelCase__ : Union[str, Any] = self(**__lowercase )
lowerCAmelCase__ : List[str] = outputs[:2]
lowerCAmelCase__ : List[Any] = logits.detach().cpu().numpy()
lowerCAmelCase__ : Optional[int] = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : List[Any] ) -> tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
lowerCAmelCase__ : Union[str, Any] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
lowerCAmelCase__ : Optional[Any] = np.argmax(__lowercase , axis=1 )
elif self.hparams.glue_output_mode == "regression":
lowerCAmelCase__ : List[str] = np.squeeze(__lowercase )
lowerCAmelCase__ : str = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
lowerCAmelCase__ : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
lowerCAmelCase__ : Optional[Any] = {**{'''val_loss''': val_loss_mean}, **compute_metrics(self.hparams.task , __lowercase , __lowercase )}
lowerCAmelCase__ : List[Any] = dict(results.items() )
lowerCAmelCase__ : Any = results
return ret, preds_list, out_label_list
def _lowerCAmelCase ( self : Any , UpperCamelCase : list ) -> dict:
"""simple docstring"""
lowerCAmelCase__ : Any = self._eval_end(__lowercase )
lowerCAmelCase__ : Optional[int] = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _lowerCAmelCase ( self : Dict , UpperCamelCase : Tuple ) -> dict:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self._eval_end(__lowercase )
lowerCAmelCase__ : Dict = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _lowerCAmelCase ( UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ) -> List[str]:
"""simple docstring"""
BaseTransformer.add_model_specific_args(__lowercase , __lowercase )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=__lowercase , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=__lowercase , required=__lowercase , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=__lowercase , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def lowercase_ ( ) -> Any:
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser()
add_generic_args(UpperCamelCase__ , os.getcwd() )
lowerCAmelCase__ : Optional[int] = GLUETransformer.add_model_specific_args(UpperCamelCase__ , os.getcwd() )
lowerCAmelCase__ : Optional[Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
lowerCAmelCase__ : List[Any] = os.path.join(
"""./results""" , f"""{args.task}_{time.strftime('%Y%m%d_%H%M%S' )}""" , )
os.makedirs(args.output_dir )
lowerCAmelCase__ : Dict = GLUETransformer(UpperCamelCase__ )
lowerCAmelCase__ : List[Any] = generic_train(UpperCamelCase__ , UpperCamelCase__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
lowerCAmelCase__ : Any = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=UpperCamelCase__ ) )
lowerCAmelCase__ : List[str] = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(UpperCamelCase__ )
if __name__ == "__main__":
main()
| 299 |
'''simple docstring'''
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@property
def __magic_name__ ( self : List[Any] ) -> List[str]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __magic_name__ ( self : Optional[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Tuple =ort.SessionOptions()
SCREAMING_SNAKE_CASE__ : Dict =False
return options
def __magic_name__ ( self : Tuple ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
SCREAMING_SNAKE_CASE__ : List[str] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
SCREAMING_SNAKE_CASE__ : Tuple =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy''' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE__ : str =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=__lowercase , feature_extractor=__lowercase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Any ='''A red cat sitting on a park bench'''
SCREAMING_SNAKE_CASE__ : Optional[int] =np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ : int =pipe(
prompt=__lowercase , image=__lowercase , mask_image=__lowercase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Optional[Any] =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1e-2 | 296 | 0 |
'''simple docstring'''
snake_case : Tuple = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
__lowercase = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
snake_case : list[bool | None] = [None] * 10_000_000
snake_case : Optional[int] = True
snake_case : str = False
def lowercase__ ( __UpperCamelCase : int ):
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowercase = chain(next_number(__UpperCamelCase ) )
__lowercase = number_chain
while number < 10000000:
__lowercase = number_chain
number *= 10
return number_chain
def lowercase__ ( __UpperCamelCase : int = 10000000 ):
'''simple docstring'''
for i in range(1 , __UpperCamelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 339 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase : int = 50000000 ):
'''simple docstring'''
__lowercase = set()
__lowercase = int((limit - 24) ** (1 / 2) )
__lowercase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __UpperCamelCase ) ) )
for primea in primes:
__lowercase = primea * primea
for primea in primes:
__lowercase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
__lowercase = primea * primea * primea * primea
__lowercase = square + cube + tetr
if total >= limit:
break
ret.add(__UpperCamelCase )
return len(__UpperCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 339 | 1 |
from __future__ import annotations
import math
def a_ ( __magic_name__ , __magic_name__ ) -> Dict:
"""simple docstring"""
if len(_snake_case ) != 2 or len(a[0] ) != 2 or len(_snake_case ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
snake_case : Optional[Any] = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def a_ ( __magic_name__ , __magic_name__ ) -> List[str]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_snake_case ) )
]
def a_ ( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(_snake_case ) )
]
def a_ ( __magic_name__ ) -> Any:
"""simple docstring"""
if len(_snake_case ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
snake_case : List[Any] = len(_snake_case )
snake_case : str = matrix_length // 2
snake_case : Dict = [[a[i][j] for j in range(_snake_case , _snake_case )] for i in range(_snake_case )]
snake_case : Optional[int] = [
[a[i][j] for j in range(_snake_case , _snake_case )] for i in range(_snake_case , _snake_case )
]
snake_case : Dict = [[a[i][j] for j in range(_snake_case )] for i in range(_snake_case )]
snake_case : str = [[a[i][j] for j in range(_snake_case )] for i in range(_snake_case , _snake_case )]
return top_left, top_right, bot_left, bot_right
def a_ ( __magic_name__ ) -> Dict:
"""simple docstring"""
return len(_snake_case ), len(matrix[0] )
def a_ ( __magic_name__ ) -> List[str]:
"""simple docstring"""
print('''\n'''.join(str(_snake_case ) for line in matrix ) )
def a_ ( __magic_name__ , __magic_name__ ) -> int:
"""simple docstring"""
if matrix_dimensions(_snake_case ) == (2, 2):
return default_matrix_multiplication(_snake_case , _snake_case )
snake_case , snake_case , snake_case , snake_case : List[Any] = split_matrix(_snake_case )
snake_case , snake_case , snake_case , snake_case : List[str] = split_matrix(_snake_case )
snake_case : Optional[int] = actual_strassen(_snake_case , matrix_subtraction(_snake_case , _snake_case ) )
snake_case : int = actual_strassen(matrix_addition(_snake_case , _snake_case ) , _snake_case )
snake_case : Optional[Any] = actual_strassen(matrix_addition(_snake_case , _snake_case ) , _snake_case )
snake_case : Dict = actual_strassen(_snake_case , matrix_subtraction(_snake_case , _snake_case ) )
snake_case : Optional[Any] = actual_strassen(matrix_addition(_snake_case , _snake_case ) , matrix_addition(_snake_case , _snake_case ) )
snake_case : Optional[Any] = actual_strassen(matrix_subtraction(_snake_case , _snake_case ) , matrix_addition(_snake_case , _snake_case ) )
snake_case : Dict = actual_strassen(matrix_subtraction(_snake_case , _snake_case ) , matrix_addition(_snake_case , _snake_case ) )
snake_case : Optional[int] = matrix_addition(matrix_subtraction(matrix_addition(_snake_case , _snake_case ) , _snake_case ) , _snake_case )
snake_case : Union[str, Any] = matrix_addition(_snake_case , _snake_case )
snake_case : Dict = matrix_addition(_snake_case , _snake_case )
snake_case : List[str] = matrix_subtraction(matrix_subtraction(matrix_addition(_snake_case , _snake_case ) , _snake_case ) , _snake_case )
# construct the new matrix from our 4 quadrants
snake_case : Tuple = []
for i in range(len(_snake_case ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(_snake_case ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def a_ ( __magic_name__ , __magic_name__ ) -> Optional[Any]:
"""simple docstring"""
if matrix_dimensions(_snake_case )[1] != matrix_dimensions(_snake_case )[0]:
snake_case : Dict = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"Matrix A: {matrixa}\n"
F"Matrix B: {matrixa}"
)
raise Exception(_snake_case )
snake_case : int = matrix_dimensions(_snake_case )
snake_case : Dict = matrix_dimensions(_snake_case )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
snake_case : List[str] = max(*_snake_case , *_snake_case )
snake_case : Any = int(math.pow(2 , math.ceil(math.loga(_snake_case ) ) ) )
snake_case : Any = matrixa
snake_case : Union[str, Any] = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , _snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _snake_case ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
snake_case : Tuple = actual_strassen(_snake_case , _snake_case )
# Removing the additional zeros
for i in range(0 , _snake_case ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , _snake_case ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
_a : str = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
_a : int = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 598 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
SCREAMING_SNAKE_CASE__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class snake_case (nn.Module ):
def __init__( self ,UpperCAmelCase_ ) -> Dict:
super().__init__()
lowercase__ = torchvision.models.resnetaaa(pretrained=UpperCAmelCase_ )
lowercase__ = list(model.children() )[:-2]
lowercase__ = nn.Sequential(*UpperCAmelCase_ )
lowercase__ = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self ,UpperCAmelCase_ ) -> str:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
lowercase__ = self.pool(self.model(UpperCAmelCase_ ) )
lowercase__ = torch.flatten(UpperCAmelCase_ ,start_dim=2 )
lowercase__ = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class snake_case (UpperCamelCase ):
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ ) -> Dict:
lowercase__ = [json.loads(UpperCAmelCase_ ) for l in open(UpperCAmelCase_ )]
lowercase__ = os.path.dirname(UpperCAmelCase_ )
lowercase__ = tokenizer
lowercase__ = labels
lowercase__ = len(UpperCAmelCase_ )
lowercase__ = max_seq_length
lowercase__ = transforms
def __len__( self ) -> Optional[Any]:
return len(self.data )
def __getitem__( self ,UpperCAmelCase_ ) -> Optional[int]:
lowercase__ = torch.LongTensor(self.tokenizer.encode(self.data[index]["text"] ,add_special_tokens=UpperCAmelCase_ ) )
lowercase__ , lowercase__ , lowercase__ = sentence[0], sentence[1:-1], sentence[-1]
lowercase__ = sentence[: self.max_seq_length]
lowercase__ = torch.zeros(self.n_classes )
lowercase__ = 1
lowercase__ = Image.open(os.path.join(self.data_dir ,self.data[index]["img"] ) ).convert("RGB" )
lowercase__ = self.transforms(UpperCAmelCase_ )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self ) -> str:
lowercase__ = Counter()
for row in self.data:
label_freqs.update(row["label"] )
return label_freqs
def lowerCamelCase ( _snake_case : Optional[int] ):
'''simple docstring'''
lowercase__ = [len(row["sentence"] ) for row in batch]
lowercase__ , lowercase__ = len(_snake_case ), max(_snake_case )
lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long )
lowercase__ = torch.zeros(_snake_case ,_snake_case ,dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(_snake_case ,_snake_case ) ):
lowercase__ = input_row["sentence"]
lowercase__ = 1
lowercase__ = torch.stack([row["image"] for row in batch] )
lowercase__ = torch.stack([row["label"] for row in batch] )
lowercase__ = torch.stack([row["image_start_token"] for row in batch] )
lowercase__ = torch.stack([row["image_end_token"] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] ,std=[0.12_221_994, 0.12_145_835, 0.14_380_469] ,),
] )
| 267 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A__: Tuple = {
'''configuration_distilbert''': [
'''DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''DistilBertConfig''',
'''DistilBertOnnxConfig''',
],
'''tokenization_distilbert''': ['''DistilBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: List[Any] = ['''DistilBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = [
'''DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DistilBertForMaskedLM''',
'''DistilBertForMultipleChoice''',
'''DistilBertForQuestionAnswering''',
'''DistilBertForSequenceClassification''',
'''DistilBertForTokenClassification''',
'''DistilBertModel''',
'''DistilBertPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: str = [
'''TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDistilBertForMaskedLM''',
'''TFDistilBertForMultipleChoice''',
'''TFDistilBertForQuestionAnswering''',
'''TFDistilBertForSequenceClassification''',
'''TFDistilBertForTokenClassification''',
'''TFDistilBertMainLayer''',
'''TFDistilBertModel''',
'''TFDistilBertPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: int = [
'''FlaxDistilBertForMaskedLM''',
'''FlaxDistilBertForMultipleChoice''',
'''FlaxDistilBertForQuestionAnswering''',
'''FlaxDistilBertForSequenceClassification''',
'''FlaxDistilBertForTokenClassification''',
'''FlaxDistilBertModel''',
'''FlaxDistilBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
A__: List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 720 |
from __future__ import annotations
import inspect
import unittest
from typing import List, Tuple
from transformers import RegNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _a :
"""simple docstring"""
def __init__( self: Dict , __lowerCamelCase: Dict , __lowerCamelCase: Dict=3 , __lowerCamelCase: str=32 , __lowerCamelCase: str=3 , __lowerCamelCase: str=10 , __lowerCamelCase: str=[10, 20, 30, 40] , __lowerCamelCase: List[str]=[1, 1, 2, 1] , __lowerCamelCase: Dict=True , __lowerCamelCase: Any=True , __lowerCamelCase: Dict="relu" , __lowerCamelCase: Tuple=3 , __lowerCamelCase: Optional[int]=None , ):
'''simple docstring'''
UpperCamelCase__: Tuple = parent
UpperCamelCase__: int = batch_size
UpperCamelCase__: Optional[int] = image_size
UpperCamelCase__: Tuple = num_channels
UpperCamelCase__: List[str] = embeddings_size
UpperCamelCase__: int = hidden_sizes
UpperCamelCase__: Optional[int] = depths
UpperCamelCase__: Optional[Any] = is_training
UpperCamelCase__: Union[str, Any] = use_labels
UpperCamelCase__: Optional[Any] = hidden_act
UpperCamelCase__: Optional[Any] = num_labels
UpperCamelCase__: Any = scope
UpperCamelCase__: List[Any] = len(__lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__: List[Any] = None
if self.use_labels:
UpperCamelCase__: Any = ids_tensor([self.batch_size] , self.num_labels )
UpperCamelCase__: Optional[int] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCAmelCase_ ( self: Tuple , __lowerCamelCase: Any , __lowerCamelCase: str , __lowerCamelCase: Optional[int] ):
'''simple docstring'''
UpperCamelCase__: Dict = TFRegNetModel(config=__lowerCamelCase )
UpperCamelCase__: str = model(__lowerCamelCase , training=__lowerCamelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCAmelCase_ ( self: Any , __lowerCamelCase: Dict , __lowerCamelCase: Optional[int] , __lowerCamelCase: Optional[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = self.num_labels
UpperCamelCase__: Optional[Any] = TFRegNetForImageClassification(__lowerCamelCase )
UpperCamelCase__: List[str] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__: List[Any] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__: Any = config_and_inputs
UpperCamelCase__: str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class _a ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else ()
UpperCamelCase__ = (
{"""feature-extraction""": TFRegNetModel, """image-classification""": TFRegNetForImageClassification}
if is_tf_available()
else {}
)
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: Dict = TFRegNetModelTester(self )
UpperCamelCase__: Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase )
def UpperCAmelCase_ ( self: int ):
'''simple docstring'''
return
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
pass
@unittest.skipIf(
not is_tf_available() or len(tf.config.list_physical_devices("GPU" ) ) == 0 , reason="TF does not support backprop for grouped convolutions on CPU." , )
@slow
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
super().test_keras_fit()
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def UpperCAmelCase_ ( self: Dict ):
'''simple docstring'''
pass
def UpperCAmelCase_ ( self: Any ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__: str = model_class(__lowerCamelCase )
UpperCamelCase__: Any = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__: List[str] = [*signature.parameters.keys()]
UpperCamelCase__: Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase_ ( self: str ):
'''simple docstring'''
def check_hidden_states_output(__lowerCamelCase: List[str] , __lowerCamelCase: List[Any] , __lowerCamelCase: List[Any] ):
UpperCamelCase__: Optional[int] = model_class(__lowerCamelCase )
UpperCamelCase__: Tuple = model(**self._prepare_for_class(__lowerCamelCase , __lowerCamelCase ) , training=__lowerCamelCase )
UpperCamelCase__: Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__: List[Any] = self.model_tester.num_stages
self.assertEqual(len(__lowerCamelCase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
UpperCamelCase__ , UpperCamelCase__: List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__: List[str] = ["basic", "bottleneck"]
for model_class in self.all_model_classes:
for layer_type in layers_type:
UpperCamelCase__: Dict = layer_type
UpperCamelCase__: List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__: List[str] = True
check_hidden_states_output(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
def UpperCAmelCase_ ( self: Optional[int] ):
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(__lowerCamelCase: Tuple , __lowerCamelCase: int , __lowerCamelCase: List[str] , __lowerCamelCase: List[str]={} ):
UpperCamelCase__: Any = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase )
UpperCamelCase__: Tuple = model(__lowerCamelCase , return_dict=__lowerCamelCase , **__lowerCamelCase ).to_tuple()
def recursive_check(__lowerCamelCase: Tuple , __lowerCamelCase: Optional[int] ):
if isinstance(__lowerCamelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__lowerCamelCase , __lowerCamelCase ):
recursive_check(__lowerCamelCase , __lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(__lowerCamelCase , __lowerCamelCase ) ) , msg=(
"Tuple and dict output are not equal. Difference:"
F" {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}"
) , )
recursive_check(__lowerCamelCase , __lowerCamelCase )
for model_class in self.all_model_classes:
UpperCamelCase__: Any = model_class(__lowerCamelCase )
UpperCamelCase__: str = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
UpperCamelCase__: Optional[int] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: Any = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
UpperCamelCase__: List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
UpperCamelCase__: Tuple = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
UpperCamelCase__: Optional[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
check_equivalence(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , {"output_hidden_states": True} )
def UpperCAmelCase_ ( self: Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__: Tuple = TFRegNetModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase_ ( ):
UpperCamelCase__: int = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_tf
@require_vision
class _a ( unittest.TestCase):
"""simple docstring"""
@cached_property
def UpperCAmelCase_ ( self: Optional[Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase_ ( self: List[Any] ):
'''simple docstring'''
UpperCamelCase__: int = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
UpperCamelCase__: List[Any] = self.default_image_processor
UpperCamelCase__: Any = prepare_img()
UpperCamelCase__: Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="tf" )
# forward pass
UpperCamelCase__: int = model(**__lowerCamelCase , training=__lowerCamelCase )
# verify the logits
UpperCamelCase__: List[Any] = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCamelCase__: int = tf.constant([-0.4_180, -1.5_051, -3.4_836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 )
| 221 | 0 |
'''simple docstring'''
def A (__lowerCamelCase :int = 100 ):
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""")
| 5 |
"""simple docstring"""
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = DownBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : int ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [-0.02_32, -0.98_69, 0.80_54, -0.06_37, -0.16_88, -1.42_64, 0.44_70, -1.33_94, 0.09_04]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = ResnetDownsampleBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : Any ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [0.07_10, 0.24_10, -0.73_20, -1.07_57, -1.13_43, 0.35_40, -0.01_33, -0.25_76, 0.09_48]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnDownBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [0.06_36, 0.89_64, -0.62_34, -1.01_31, 0.08_44, 0.49_35, 0.34_37, 0.09_11, -0.29_57]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = CrossAttnDownBlockaD # noqa F405
lowercase = '''down'''
def UpperCAmelCase (self : int ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = [0.22_38, -0.73_96, -0.22_55, -0.38_29, 0.19_25, 1.16_65, 0.06_03, -0.72_95, 0.19_83]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SimpleCrossAttnDownBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : Dict ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def UpperCAmelCase (self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [0.79_21, -0.09_92, -0.19_62, -0.76_95, -0.42_42, 0.78_04, 0.47_37, 0.27_65, 0.33_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SkipDownBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : List[Any] ) -> int:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Optional[int] ) -> int:
"""simple docstring"""
lowerCAmelCase = [-0.08_45, -0.20_87, -0.24_65, 0.09_71, 0.19_00, -0.04_84, 0.26_64, 0.41_79, 0.50_69]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnSkipDownBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return super().get_dummy_input(include_skip_sample=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = [0.55_39, 0.16_09, 0.49_24, 0.05_37, -0.19_95, 0.40_50, 0.09_79, -0.27_21, -0.06_42]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = DownEncoderBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : int ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : str ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [1.11_02, 0.53_02, 0.48_72, -0.00_23, -0.80_42, 0.04_83, -0.34_89, -0.56_32, 0.76_26]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnDownEncoderBlockaD # noqa F405
lowercase = '''down'''
@property
def UpperCAmelCase (self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = {
'''in_channels''': 32,
'''out_channels''': 32,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = [0.89_66, -0.14_86, 0.85_68, 0.81_41, -0.90_46, -0.13_42, -0.09_72, -0.74_17, 0.15_38]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UNetMidBlockaD # noqa F405
lowercase = '''mid'''
def UpperCAmelCase (self : List[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = {
'''in_channels''': 32,
'''temb_channels''': 128,
}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
lowerCAmelCase = [-0.10_62, 1.72_48, 0.34_94, 1.45_69, -0.09_10, -1.24_21, -0.99_84, 0.67_36, 1.00_28]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UNetMidBlockaDCrossAttn # noqa F405
lowercase = '''mid'''
def UpperCAmelCase (self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : Any ) -> Dict:
"""simple docstring"""
lowerCAmelCase = [0.01_87, 2.42_20, 0.44_84, 1.12_03, -0.61_21, -1.51_22, -0.82_70, 0.78_51, 1.83_35]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowercase = '''mid'''
@property
def UpperCAmelCase (self : Union[str, Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : int ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : str ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = [0.71_43, 1.99_74, 0.54_48, 1.39_77, 0.12_82, -1.12_37, -1.42_38, 0.55_30, 0.88_80]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : int ) -> Any:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [-0.20_41, -0.41_65, -0.30_22, 0.00_41, -0.66_28, -0.70_53, 0.19_28, -0.03_25, 0.05_23]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = ResnetUpsampleBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[int] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Dict ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase = [0.22_87, 0.35_49, -0.13_46, 0.47_97, -0.17_15, -0.96_49, 0.73_05, -0.58_64, -0.62_44]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = CrossAttnUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Tuple ) -> List[str]:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : str ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [-0.14_03, -0.35_15, -0.04_20, -0.14_25, 0.31_67, 0.50_94, -0.21_81, 0.59_31, 0.55_82]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SimpleCrossAttnUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Any ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ ,include_encoder_hidden_states=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Union[str, Any] ) -> int:
"""simple docstring"""
lowerCAmelCase , lowerCAmelCase = super().prepare_init_args_and_inputs_for_common()
lowerCAmelCase = 32
return init_dict, inputs_dict
def UpperCAmelCase (self : Tuple ) -> Any:
"""simple docstring"""
lowerCAmelCase = [0.26_45, 0.14_80, 0.09_09, 0.80_44, -0.97_58, -0.90_83, 0.09_94, -1.14_53, -0.74_02]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : List[str] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
@unittest.skipIf(torch_device == '''mps''' ,'''MPS result is not consistent''' )
def UpperCAmelCase (self : int ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = [0.09_79, 0.13_26, 0.00_21, 0.06_59, 0.22_49, 0.00_59, 0.11_32, 0.59_52, 0.10_33]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = SkipUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[int] ) -> List[str]:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : str ) -> int:
"""simple docstring"""
lowerCAmelCase = [-0.08_93, -0.12_34, -0.15_06, -0.03_32, 0.01_23, -0.02_11, 0.05_66, 0.01_43, 0.03_62]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnSkipUpBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Optional[Any] ) -> str:
"""simple docstring"""
return super().get_dummy_input(include_res_hidden_states_tuple=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> int:
"""simple docstring"""
lowerCAmelCase = [0.03_61, 0.06_17, 0.27_87, -0.03_50, 0.03_42, 0.34_21, -0.08_43, 0.09_13, 0.30_15]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = UpDecoderBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : List[str] ) -> List[Any]:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = {'''in_channels''': 32, '''out_channels''': 32}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Dict ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase = [0.44_04, 0.19_98, -0.98_86, -0.33_20, -0.31_28, -0.70_34, -0.69_55, -0.23_38, -0.31_37]
super().test_output(SCREAMING_SNAKE_CASE_ )
class lowercase ( lowercase__ ,unittest.TestCase ):
lowercase = AttnUpDecoderBlockaD # noqa F405
lowercase = '''up'''
@property
def UpperCAmelCase (self : Any ) -> Dict:
"""simple docstring"""
return super().get_dummy_input(include_temb=SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase (self : Any ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = {'''in_channels''': 32, '''out_channels''': 32}
lowerCAmelCase = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase (self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = [0.67_38, 0.44_91, 0.10_55, 1.07_10, 0.73_16, 0.33_39, 0.33_52, 0.10_23, 0.35_68]
super().test_output(SCREAMING_SNAKE_CASE_ )
| 535 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : str , lowercase : Optional[Any]=False ) -> int:
_a = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("head" ):
_a = "segformer.encoder." + key
if key.startswith("backbone" ):
_a = key.replace("backbone" , "segformer.encoder" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_a = key[key.find("patch_embed" ) + len("patch_embed" )]
_a = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowercase )-1}' )
if "norm" in key:
_a = key.replace("norm" , "layer_norm" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_a = key[key.find("segformer.encoder.layer_norm" ) + len("segformer.encoder.layer_norm" )]
_a = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowercase )-1}' )
if "layer_norm1" in key:
_a = key.replace("layer_norm1" , "layer_norm_1" )
if "layer_norm2" in key:
_a = key.replace("layer_norm2" , "layer_norm_2" )
if "block" in key:
# replace for example block1 by block.0
_a = key[key.find("block" ) + len("block" )]
_a = key.replace(F'block{idx}' , F'block.{int(lowercase )-1}' )
if "attn.q" in key:
_a = key.replace("attn.q" , "attention.self.query" )
if "attn.proj" in key:
_a = key.replace("attn.proj" , "attention.output.dense" )
if "attn" in key:
_a = key.replace("attn" , "attention.self" )
if "fc1" in key:
_a = key.replace("fc1" , "dense1" )
if "fc2" in key:
_a = key.replace("fc2" , "dense2" )
if "linear_pred" in key:
_a = key.replace("linear_pred" , "classifier" )
if "linear_fuse" in key:
_a = key.replace("linear_fuse.conv" , "linear_fuse" )
_a = key.replace("linear_fuse.bn" , "batch_norm" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_a = key[key.find("linear_c" ) + len("linear_c" )]
_a = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowercase )-1}' )
if key.startswith("head" ):
_a = key.replace("head" , "classifier" )
_a = value
return new_state_dict
def _lowerCamelCase ( lowercase : str , lowercase : str ) -> Dict:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_a = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
_a = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_a = kv_weight[
: config.hidden_sizes[i], :
]
_a = kv_bias[: config.hidden_sizes[i]]
_a = kv_weight[
config.hidden_sizes[i] :, :
]
_a = kv_bias[
config.hidden_sizes[i] :
]
def _lowerCamelCase ( ) -> Dict:
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(lowercase , stream=lowercase ).raw )
return image
@torch.no_grad()
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Union[str, Any] , lowercase : Optional[int] ) -> Optional[Any]:
_a = SegformerConfig()
_a = False
# set attributes based on model_name
_a = "huggingface/label-files"
if "segformer" in model_name:
_a = model_name[len("segformer." ) : len("segformer." ) + 2]
if "ade" in model_name:
_a = 150
_a = "ade20k-id2label.json"
_a = (1, 150, 128, 128)
elif "city" in model_name:
_a = 19
_a = "cityscapes-id2label.json"
_a = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
_a = True
_a = model_name[4:6]
_a = 1000
_a = "imagenet-1k-id2label.json"
_a = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
_a = [64, 128, 320, 512]
_a = 256
elif size == "b2":
_a = [64, 128, 320, 512]
_a = 768
_a = [3, 4, 6, 3]
elif size == "b3":
_a = [64, 128, 320, 512]
_a = 768
_a = [3, 4, 18, 3]
elif size == "b4":
_a = [64, 128, 320, 512]
_a = 768
_a = [3, 8, 27, 3]
elif size == "b5":
_a = [64, 128, 320, 512]
_a = 768
_a = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
_a = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase , align=lowercase , do_random_crop=lowercase )
# prepare image
_a = prepare_img()
_a = image_processor(images=lowercase , return_tensors="pt" ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
_a = torch.load(lowercase , map_location=torch.device("cpu" ) )
else:
_a = torch.load(lowercase , map_location=torch.device("cpu" ) )["state_dict"]
# rename keys
_a = rename_keys(lowercase , encoder_only=lowercase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowercase , lowercase )
# create HuggingFace model and load state dict
if encoder_only:
_a = False
_a = SegformerForImageClassification(lowercase )
else:
_a = SegformerForSemanticSegmentation(lowercase )
model.load_state_dict(lowercase )
model.eval()
# forward pass
_a = model(lowercase )
_a = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
_a = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.13_91, -13.31_22, -13.95_54], [-12.87_32, -13.93_52, -14.35_63], [-12.94_38, -13.82_26, -14.25_13]],
[[-12.51_34, -13.46_86, -14.49_15], [-12.86_69, -14.43_43, -14.77_58], [-13.25_23, -14.58_19, -15.06_94]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
_a = torch.tensor(
[
[[-7.58_20, -8.72_31, -8.32_15], [-8.06_00, -10.35_29, -10.03_04], [-7.52_08, -9.41_03, -9.62_39]],
[[-12.69_18, -13.89_94, -13.71_37], [-13.31_96, -15.75_23, -15.47_89], [-12.93_43, -14.87_57, -14.96_89]],
[[-11.19_11, -11.94_21, -11.32_43], [-11.33_42, -13.68_39, -13.35_81], [-10.39_09, -12.18_32, -12.48_58]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
_a = torch.tensor(
[
[[-11.81_73, -14.38_50, -16.31_28], [-14.56_48, -16.58_04, -18.65_68], [-14.72_23, -15.73_87, -18.42_18]],
[[-15.72_90, -17.91_71, -19.44_23], [-18.31_05, -19.94_48, -21.46_61], [-17.92_96, -18.64_97, -20.79_10]],
[[-15.07_83, -17.03_36, -18.27_89], [-16.87_71, -18.68_70, -20.16_12], [-16.24_54, -17.14_26, -19.50_55]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
_a = torch.tensor(
[
[[-9.08_78, -10.20_81, -10.18_91], [-9.31_44, -10.79_41, -10.98_43], [-9.22_94, -10.38_55, -10.57_04]],
[[-12.23_16, -13.90_68, -13.61_02], [-12.91_61, -14.37_02, -14.32_35], [-12.52_33, -13.71_74, -13.79_32]],
[[-14.62_75, -15.24_90, -14.97_27], [-14.34_00, -15.96_87, -16.28_27], [-14.14_84, -15.40_33, -15.89_37]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
_a = torch.tensor(
[
[[-12.31_44, -13.24_47, -14.08_02], [-13.36_14, -14.58_16, -15.61_17], [-13.33_40, -14.44_33, -16.22_19]],
[[-19.27_81, -20.41_28, -20.75_06], [-20.61_53, -21.65_66, -22.09_98], [-19.98_00, -21.04_30, -22.14_94]],
[[-18.87_39, -19.78_04, -21.18_34], [-20.12_33, -21.67_65, -23.29_44], [-20.03_15, -21.26_41, -23.69_44]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
_a = torch.tensor(
[
[[-9.55_24, -12.08_35, -11.73_48], [-10.52_29, -13.64_46, -14.56_62], [-9.58_42, -12.88_51, -13.94_14]],
[[-15.34_32, -17.53_23, -17.08_18], [-16.33_30, -18.92_55, -19.21_01], [-15.13_40, -17.78_48, -18.39_71]],
[[-12.60_72, -14.94_86, -14.66_31], [-13.76_29, -17.09_07, -17.77_45], [-12.78_99, -16.16_95, -17.16_71]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
_a = torch.tensor(
[
[[-11.92_95, -13.40_57, -14.81_06], [-13.34_31, -14.81_79, -15.37_81], [-14.28_36, -15.59_42, -16.15_88]],
[[-11.49_06, -12.80_67, -13.65_64], [-13.11_89, -14.05_00, -14.15_43], [-13.87_48, -14.51_36, -14.87_89]],
[[0.53_74, 0.10_67, -0.47_42], [0.11_41, -0.22_55, -0.70_99], [-0.30_00, -0.59_24, -1.31_05]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
_a = torch.tensor(
[
[[-7.82_17, -9.87_67, -10.17_17], [-9.44_38, -10.90_58, -11.40_47], [-9.79_39, -12.34_95, -12.10_79]],
[[-7.15_14, -9.53_36, -10.08_60], [-9.77_76, -11.68_22, -11.84_39], [-10.14_11, -12.76_55, -12.89_72]],
[[0.30_21, 0.08_05, -0.23_10], [-0.03_28, -0.16_05, -0.27_14], [-0.14_08, -0.54_77, -0.69_76]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
_a = torch.tensor(
[
[
[-1.1_3_7_2E0_1, -1.2_7_8_7E0_1, -1.3_4_7_7E0_1],
[-1.2_5_3_6E0_1, -1.4_1_9_4E0_1, -1.4_4_0_9E0_1],
[-1.3_2_1_7E0_1, -1.4_8_8_8E0_1, -1.5_3_2_7E0_1],
],
[
[-1.4_7_9_1E0_1, -1.7_1_2_2E0_1, -1.8_2_7_7E0_1],
[-1.7_1_6_3E0_1, -1.9_1_9_2E0_1, -1.9_5_3_3E0_1],
[-1.7_8_9_7E0_1, -1.9_9_9_1E0_1, -2.0_3_1_5E0_1],
],
[
[7.6_7_2_3E-0_1, 4.1_9_2_1E-0_1, -7.7_8_7_8E-0_2],
[4.7_7_7_2E-0_1, 9.5_5_5_7E-0_3, -2.8_0_8_2E-0_1],
[3.6_0_3_2E-0_1, -2.4_8_2_6E-0_1, -5.1_1_6_8E-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
_a = torch.tensor(
[
[[-9.49_59, -11.30_87, -11.74_79], [-11.00_25, -12.65_40, -12.33_19], [-11.40_64, -13.04_87, -12.99_05]],
[[-9.89_05, -11.30_84, -12.08_54], [-11.17_26, -12.76_98, -12.95_83], [-11.59_85, -13.32_78, -14.17_74]],
[[0.22_13, 0.01_92, -0.24_66], [-0.17_31, -0.42_13, -0.48_74], [-0.31_26, -0.65_41, -1.13_89]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
_a = torch.tensor(
[
[[-13.57_48, -13.91_11, -12.65_00], [-14.35_00, -15.36_83, -14.23_28], [-14.75_32, -16.04_24, -15.60_87]],
[[-17.16_51, -15.87_25, -12.96_53], [-17.25_80, -17.37_18, -14.82_23], [-16.60_58, -16.87_83, -16.74_52]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
_a = torch.tensor(
[
[[-16.09_76, -16.48_56, -17.39_62], [-16.62_34, -19.03_42, -19.76_85], [-16.09_00, -18.06_61, -19.11_80]],
[[-18.47_50, -18.84_88, -19.50_74], [-19.40_30, -22.15_70, -22.59_77], [-19.11_91, -20.84_86, -22.37_83]],
[[-4.51_78, -5.50_37, -6.51_09], [-5.08_84, -7.21_74, -8.03_34], [-4.41_56, -5.81_17, -7.29_70]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
_a = torch.tensor(
[
[[-14.20_81, -14.47_32, -14.19_77], [-14.58_67, -16.44_23, -16.63_56], [-13.44_41, -14.96_85, -16.86_96]],
[[-14.45_76, -14.70_73, -15.04_51], [-15.08_16, -17.62_37, -17.98_73], [-14.42_13, -16.01_99, -18.59_92]],
[[-4.73_49, -4.95_88, -5.09_66], [-4.32_10, -6.93_25, -7.25_91], [-3.43_12, -4.74_84, -7.19_17]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
_a = torch.tensor(
[
[[-11.77_37, -11.95_26, -11.32_73], [-13.66_92, -14.45_74, -13.88_78], [-13.89_37, -14.69_24, -15.93_45]],
[[-14.67_06, -14.53_30, -14.13_06], [-16.15_02, -16.81_80, -16.42_69], [-16.83_38, -17.89_39, -20.17_46]],
[[1.04_91, 0.82_89, 1.03_10], [1.10_44, 0.52_19, 0.80_55], [1.08_99, 0.69_26, 0.55_90]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
_a = torch.tensor(
[
[[-12.56_41, -13.47_77, -13.06_84], [-13.95_87, -15.89_83, -16.65_57], [-13.31_09, -15.73_50, -16.31_41]],
[[-14.70_74, -15.43_52, -14.59_44], [-16.63_53, -18.16_63, -18.61_20], [-15.17_02, -18.03_29, -18.15_47]],
[[-1.79_90, -2.09_51, -1.77_84], [-2.63_97, -3.82_45, -3.96_86], [-1.52_64, -2.81_26, -2.93_16]],
] )
else:
_a = logits.argmax(-1 ).item()
print("Predicted class:" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowercase ).mkdir(exist_ok=lowercase )
model.save_pretrained(lowercase )
image_processor.save_pretrained(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='segformer.b0.512x512.ade.160k',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
lowerCAmelCase_ : Optional[int] = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 521 |
'''simple docstring'''
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
@staticmethod
def UpperCamelCase__ ( *__a : Optional[int] , **__a : List[Any] ):
pass
def _lowerCamelCase ( lowercase : Image ) -> str:
_a = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
__a =MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def UpperCamelCase__ ( self : int , __a : Optional[int] , __a : int , __a : Tuple ):
_a = DepthEstimationPipeline(model=__a , image_processor=__a )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def UpperCamelCase__ ( self : int , __a : Union[str, Any] , __a : str ):
_a = depth_estimator("./tests/fixtures/tests_samples/COCO/000000039769.png" )
self.assertEqual({"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )} , __a )
import datasets
_a = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
_a = depth_estimator(
[
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
] )
self.assertEqual(
[
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
{"predicted_depth": ANY(torch.Tensor ), "depth": ANY(Image.Image )},
] , __a , )
@require_tf
@unittest.skip("Depth estimation is not implemented in TF" )
def UpperCamelCase__ ( self : List[Any] ):
pass
@slow
@require_torch
def UpperCamelCase__ ( self : List[str] ):
_a = "Intel/dpt-large"
_a = pipeline("depth-estimation" , model=__a )
_a = depth_estimator("http://images.cocodataset.org/val2017/000000039769.jpg" )
_a = hashimage(outputs["depth"] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs["predicted_depth"].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs["predicted_depth"].min().item() ) , 2.662 )
@require_torch
def UpperCamelCase__ ( self : Tuple ):
# This is highly irregular to have no small tests.
self.skipTest("There is not hf-internal-testing tiny model for either GLPN nor DPT" )
| 521 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase_ :
def __init__( self ,snake_case__=2 ,snake_case__=3 ,snake_case__=64 ,snake_case__=None ):
SCREAMING_SNAKE_CASE_ : List[Any] = np.random.default_rng(snake_case__ )
SCREAMING_SNAKE_CASE_ : int = length
SCREAMING_SNAKE_CASE_ : Optional[int] = rng.normal(size=(length,) ).astype(np.floataa )
SCREAMING_SNAKE_CASE_ : List[Any] = a * self.x + b + rng.normal(scale=0.1 ,size=(length,) ).astype(np.floataa )
def __len__( self ):
return self.length
def __getitem__( self ,snake_case__ ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCAmelCase_ ( torch.nn.Module ):
def __init__( self ,snake_case__=0 ,snake_case__=0 ,snake_case__=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ : Dict = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
def snake_case ( self ,snake_case__=None ):
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
return x * self.a[0] + self.b[0]
class lowerCAmelCase_ ( torch.nn.Module ):
def __init__( self ,snake_case__=0 ,snake_case__=0 ,snake_case__=False ):
super().__init__()
SCREAMING_SNAKE_CASE_ : List[str] = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.nn.Parameter(torch.tensor(snake_case__ ).float() )
SCREAMING_SNAKE_CASE_ : Tuple = True
def snake_case ( self ,snake_case__=None ):
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
SCREAMING_SNAKE_CASE_ : Tuple = False
return x * self.a + self.b
def __UpperCAmelCase ( lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : int = 16 ) -> List[str]:
"""simple docstring"""
from datasets import load_dataset
from transformers import AutoTokenizer
SCREAMING_SNAKE_CASE_ : Optional[int] = AutoTokenizer.from_pretrained('bert-base-cased' )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
SCREAMING_SNAKE_CASE_ : Optional[Any] = load_dataset('csv' , data_files=lowerCamelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = datasets['train'].unique('label' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = {v: i for i, v in enumerate(lowerCamelCase_ )}
def tokenize_function(lowerCamelCase_ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : Dict = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=lowerCamelCase_ , max_length=lowerCamelCase_ , padding='max_length' )
if "label" in examples:
SCREAMING_SNAKE_CASE_ : str = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
SCREAMING_SNAKE_CASE_ : Optional[int] = datasets.map(
lowerCamelCase_ , batched=lowerCamelCase_ , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(lowerCamelCase_ : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCamelCase_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(lowerCamelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : Any = DataLoader(tokenized_datasets['train'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=2 )
SCREAMING_SNAKE_CASE_ : Optional[int] = DataLoader(tokenized_datasets['validation'] , shuffle=lowerCamelCase_ , collate_fn=lowerCamelCase_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 105 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase__ : Optional[int] = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Tuple = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ : Any = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 105 | 1 |
"""simple docstring"""
def lowerCamelCase ( _snake_case ,_snake_case ):
print('\nThe shortest path matrix using Floyd Warshall algorithm\n' )
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
if dist[i][j] != float('inf' ):
print(int(dist[i][j] ) ,end='\t' )
else:
print('INF' ,end='\t' )
print()
def lowerCamelCase ( _snake_case ,_snake_case ):
UpperCAmelCase__ : List[str] = [[float('inf' ) for _ in range(_SCREAMING_SNAKE_CASE )] for _ in range(_SCREAMING_SNAKE_CASE )]
for i in range(_SCREAMING_SNAKE_CASE ):
for j in range(_SCREAMING_SNAKE_CASE ):
UpperCAmelCase__ : Tuple = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(_SCREAMING_SNAKE_CASE ):
# looping through rows of graph array
for i in range(_SCREAMING_SNAKE_CASE ):
# looping through columns of graph array
for j in range(_SCREAMING_SNAKE_CASE ):
if (
dist[i][k] != float('inf' )
and dist[k][j] != float('inf' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCAmelCase__ : str = dist[i][k] + dist[k][j]
_print_dist(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ = int(input('Enter number of vertices: '))
UpperCamelCase__ = int(input('Enter number of edges: '))
UpperCamelCase__ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ = int(input('Enter source:'))
UpperCamelCase__ = int(input('Enter destination:'))
UpperCamelCase__ = float(input('Enter weight:'))
UpperCamelCase__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 707 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class a :
UpperCamelCase : int
UpperCamelCase : int
class a :
def __init__( self , UpperCamelCase_ ):
UpperCAmelCase__ : list[list[Edge]] = [[] for _ in range(UpperCamelCase_ )]
UpperCAmelCase__ : Union[str, Any] = size
def __getitem__( self , UpperCamelCase_ ):
return iter(self._graph[vertex] )
@property
def __snake_case ( self ):
return self._size
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(UpperCamelCase_ , UpperCamelCase_ ) )
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
UpperCAmelCase__ : List[str] = deque([start_vertex] )
UpperCAmelCase__ : list[int | None] = [None] * self.size
UpperCAmelCase__ : List[str] = 0
while queue:
UpperCAmelCase__ : Dict = queue.popleft()
UpperCAmelCase__ : Optional[Any] = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
UpperCAmelCase__ : Optional[int] = current_distance + edge.weight
UpperCAmelCase__ : Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(UpperCamelCase_ , UpperCamelCase_ )
and new_distance >= dest_vertex_distance
):
continue
UpperCAmelCase__ : Dict = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 254 | 0 |
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
_lowerCAmelCase : Tuple = 1.0_5457_1817e-34 # unit of ℏ : J * s
_lowerCAmelCase : int = 3e8 # unit of c : m * s^-1
def a_ ( UpperCamelCase_ : float , UpperCamelCase_ : float , UpperCamelCase_ : float ) -> dict[str, float]:
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
lowerCamelCase = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_4_0 * (distance) ** 4
)
return {"force": force}
elif area == 0:
lowerCamelCase = (2_4_0 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
lowerCamelCase = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_4_0 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 246 |
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
_lowerCAmelCase : List[Any] = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
_lowerCAmelCase : int = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
_lowerCAmelCase : int = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
_lowerCAmelCase : Optional[int] = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
_lowerCAmelCase : Dict = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase ( datasets.Metric ):
'''simple docstring'''
def lowerCamelCase__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' ) ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/openai/human-eval' , codebase_urls=['https://github.com/openai/human-eval'] , reference_urls=['https://github.com/openai/human-eval'] , license=_LICENSE , )
def lowerCamelCase__ ( self : int , __snake_case : List[Any] , __snake_case : int , __snake_case : Any=[1, 10, 100] , __snake_case : str=4 , __snake_case : List[Any]=3.0 ) -> Union[str, Any]:
'''simple docstring'''
if os.getenv('HF_ALLOW_CODE_EVAL' , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError('This metric is currently not supported on Windows.' )
with ThreadPoolExecutor(max_workers=__snake_case ) as executor:
lowerCamelCase = []
lowerCamelCase = Counter()
lowerCamelCase = 0
lowerCamelCase = defaultdict(__snake_case )
for task_id, (candidates, test_case) in enumerate(zip(__snake_case , __snake_case ) ):
for candidate in candidates:
lowerCamelCase = candidate + '\n' + test_case
lowerCamelCase = (test_program, timeout, task_id, completion_id[task_id])
lowerCamelCase = executor.submit(__snake_case , *__snake_case )
futures.append(__snake_case )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__snake_case ):
lowerCamelCase = future.result()
results[result["task_id"]].append((result['completion_id'], result) )
lowerCamelCase , lowerCamelCase = [], []
for result in results.values():
result.sort()
lowerCamelCase = [r[1]['passed'] for r in result]
total.append(len(__snake_case ) )
correct.append(sum(__snake_case ) )
lowerCamelCase = np.array(__snake_case )
lowerCamelCase = np.array(__snake_case )
lowerCamelCase = k
lowerCamelCase = {F'''pass@{k}''': estimate_pass_at_k(__snake_case , __snake_case , __snake_case ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def a_ ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> Optional[Any]:
"""simple docstring"""
def estimator(UpperCamelCase_ : int , UpperCamelCase_ : int , UpperCamelCase_ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCamelCase = itertools.repeat(UpperCamelCase_ , len(UpperCamelCase_ ) )
else:
assert len(UpperCamelCase_ ) == len(UpperCamelCase_ )
lowerCamelCase = iter(UpperCamelCase_ )
return np.array([estimator(int(UpperCamelCase_ ) , int(UpperCamelCase_ ) , UpperCamelCase_ ) for n, c in zip(UpperCamelCase_ , UpperCamelCase_ )] )
| 246 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Optional[int] = logging.get_logger(__name__)
A_ : Optional[Any] = {
"facebook/vit-mae-base": "https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json",
# See all ViT MAE models at https://huggingface.co/models?filter=vit-mae
}
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''vit_mae'''
def __init__( self , __SCREAMING_SNAKE_CASE=7_6_8 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=1_2 , __SCREAMING_SNAKE_CASE=3_0_7_2 , __SCREAMING_SNAKE_CASE="gelu" , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.02 , __SCREAMING_SNAKE_CASE=1e-1_2 , __SCREAMING_SNAKE_CASE=2_2_4 , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=1_6 , __SCREAMING_SNAKE_CASE=5_1_2 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=2_0_4_8 , __SCREAMING_SNAKE_CASE=0.75 , __SCREAMING_SNAKE_CASE=False , **__SCREAMING_SNAKE_CASE , ):
super().__init__(**__SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[str] = num_attention_heads
snake_case__ : List[str] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[str] = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : Tuple = qkv_bias
snake_case__ : Dict = decoder_num_attention_heads
snake_case__ : List[str] = decoder_hidden_size
snake_case__ : str = decoder_num_hidden_layers
snake_case__ : List[str] = decoder_intermediate_size
snake_case__ : List[Any] = mask_ratio
snake_case__ : str = norm_pix_loss
| 716 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __UpperCamelCase ( self ):
snake_case__ : int = tempfile.mkdtemp()
snake_case__ : Optional[int] = 8
# DPR tok
snake_case__ : List[Any] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : str = os.path.join(__SCREAMING_SNAKE_CASE , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : Dict = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
snake_case__ : Dict = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Optional[Any] = {"""unk_token""": """<unk>"""}
snake_case__ : Dict = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
snake_case__ : str = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
snake_case__ : Optional[int] = os.path.join(__SCREAMING_SNAKE_CASE , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__SCREAMING_SNAKE_CASE ) )
def __UpperCamelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __UpperCamelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , """rag_tokenizer""" )
snake_case__ : str = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
snake_case__ : List[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(__SCREAMING_SNAKE_CASE )
rag_tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
snake_case__ : Optional[int] = RagTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
self.assertIsInstance(new_rag_tokenizer.question_encoder , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , __SCREAMING_SNAKE_CASE )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def __UpperCamelCase ( self ):
snake_case__ : Optional[int] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
snake_case__ : List[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ : Tuple = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
@slow
def __UpperCamelCase ( self ):
snake_case__ : str = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
snake_case__ : Optional[Any] = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
snake_case__ : List[Any] = tokenizer(__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 419 | 0 |
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowerCamelCase_ : str = tau * frequency / samplerate
lowerCamelCase_ : Optional[Any] = sin(__UpperCAmelCase )
lowerCamelCase_ : Dict = cos(__UpperCAmelCase )
lowerCamelCase_ : int = _sin / (2 * q_factor)
lowerCamelCase_ : int = (1 - _cos) / 2
lowerCamelCase_ : List[str] = 1 - _cos
lowerCamelCase_ : int = 1 + alpha
lowerCamelCase_ : List[Any] = -2 * _cos
lowerCamelCase_ : int = 1 - alpha
lowerCamelCase_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowerCamelCase_ : Optional[int] = tau * frequency / samplerate
lowerCamelCase_ : Dict = sin(__UpperCAmelCase )
lowerCamelCase_ : int = cos(__UpperCAmelCase )
lowerCamelCase_ : Any = _sin / (2 * q_factor)
lowerCamelCase_ : Dict = (1 + _cos) / 2
lowerCamelCase_ : Optional[int] = -1 - _cos
lowerCamelCase_ : Tuple = 1 + alpha
lowerCamelCase_ : Any = -2 * _cos
lowerCamelCase_ : Tuple = 1 - alpha
lowerCamelCase_ : int = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowerCamelCase_ : List[str] = tau * frequency / samplerate
lowerCamelCase_ : str = sin(__UpperCAmelCase )
lowerCamelCase_ : List[str] = cos(__UpperCAmelCase )
lowerCamelCase_ : str = _sin / (2 * q_factor)
lowerCamelCase_ : Optional[int] = _sin / 2
lowerCamelCase_ : Any = 0
lowerCamelCase_ : Optional[Any] = -ba
lowerCamelCase_ : Any = 1 + alpha
lowerCamelCase_ : Union[str, Any] = -2 * _cos
lowerCamelCase_ : Any = 1 - alpha
lowerCamelCase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float = 1 / sqrt(2 ) ) -> IIRFilter:
"""simple docstring"""
lowerCamelCase_ : Tuple = tau * frequency / samplerate
lowerCamelCase_ : Dict = sin(__UpperCAmelCase )
lowerCamelCase_ : List[Any] = cos(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = _sin / (2 * q_factor)
lowerCamelCase_ : List[Any] = 1 - alpha
lowerCamelCase_ : Dict = -2 * _cos
lowerCamelCase_ : Optional[int] = 1 + alpha
lowerCamelCase_ : str = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
lowerCamelCase_ : Dict = tau * frequency / samplerate
lowerCamelCase_ : List[str] = sin(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = cos(__UpperCAmelCase )
lowerCamelCase_ : Union[str, Any] = _sin / (2 * q_factor)
lowerCamelCase_ : List[str] = 10 ** (gain_db / 40)
lowerCamelCase_ : Optional[Any] = 1 + alpha * big_a
lowerCamelCase_ : str = -2 * _cos
lowerCamelCase_ : Optional[int] = 1 - alpha * big_a
lowerCamelCase_ : int = 1 + alpha / big_a
lowerCamelCase_ : List[str] = -2 * _cos
lowerCamelCase_ : List[str] = 1 - alpha / big_a
lowerCamelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
lowerCamelCase_ : List[Any] = tau * frequency / samplerate
lowerCamelCase_ : Optional[int] = sin(__UpperCAmelCase )
lowerCamelCase_ : int = cos(__UpperCAmelCase )
lowerCamelCase_ : Optional[int] = _sin / (2 * q_factor)
lowerCamelCase_ : List[Any] = 10 ** (gain_db / 40)
lowerCamelCase_ : int = (big_a + 1) - (big_a - 1) * _cos
lowerCamelCase_ : List[str] = (big_a + 1) + (big_a - 1) * _cos
lowerCamelCase_ : Union[str, Any] = (big_a - 1) - (big_a + 1) * _cos
lowerCamelCase_ : List[Any] = (big_a - 1) + (big_a + 1) * _cos
lowerCamelCase_ : Optional[Any] = 2 * sqrt(__UpperCAmelCase ) * alpha
lowerCamelCase_ : str = big_a * (pmc + aaa)
lowerCamelCase_ : Any = 2 * big_a * mpc
lowerCamelCase_ : List[Any] = big_a * (pmc - aaa)
lowerCamelCase_ : Dict = ppmc + aaa
lowerCamelCase_ : Tuple = -2 * pmpc
lowerCamelCase_ : Any = ppmc - aaa
lowerCamelCase_ : List[str] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def __a ( __UpperCAmelCase : int , __UpperCAmelCase : int , __UpperCAmelCase : float , __UpperCAmelCase : float = 1 / sqrt(2 ) , ) -> IIRFilter:
"""simple docstring"""
lowerCamelCase_ : List[Any] = tau * frequency / samplerate
lowerCamelCase_ : Union[str, Any] = sin(__UpperCAmelCase )
lowerCamelCase_ : Optional[Any] = cos(__UpperCAmelCase )
lowerCamelCase_ : Tuple = _sin / (2 * q_factor)
lowerCamelCase_ : List[Any] = 10 ** (gain_db / 40)
lowerCamelCase_ : Optional[int] = (big_a + 1) - (big_a - 1) * _cos
lowerCamelCase_ : int = (big_a + 1) + (big_a - 1) * _cos
lowerCamelCase_ : Any = (big_a - 1) - (big_a + 1) * _cos
lowerCamelCase_ : Tuple = (big_a - 1) + (big_a + 1) * _cos
lowerCamelCase_ : Dict = 2 * sqrt(__UpperCAmelCase ) * alpha
lowerCamelCase_ : Optional[Any] = big_a * (ppmc + aaa)
lowerCamelCase_ : Any = -2 * big_a * pmpc
lowerCamelCase_ : Union[str, Any] = big_a * (ppmc - aaa)
lowerCamelCase_ : str = pmc + aaa
lowerCamelCase_ : List[str] = 2 * mpc
lowerCamelCase_ : Dict = pmc - aaa
lowerCamelCase_ : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 488 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
lowerCamelCase = None
class snake_case_ ( __A ):
'''simple docstring'''
def __init__( self : str , __magic_name__ : Optional[Any]=1 , __magic_name__ : Union[str, Any]=0 , __magic_name__ : List[Any]=2 , __magic_name__ : Optional[Any]=512 , __magic_name__ : Optional[Any]="cls" , __magic_name__ : Any=False , __magic_name__ : Dict=True , **__magic_name__ : Optional[int] , ) -> Union[str, Any]:
super().__init__(pad_token_id=__magic_name__ , bos_token_id=__magic_name__ , eos_token_id=__magic_name__ , **__magic_name__ )
lowerCamelCase_ : Union[str, Any] = project_dim
lowerCamelCase_ : Optional[Any] = pooler_fn
lowerCamelCase_ : Optional[int] = learn_encoder
lowerCamelCase_ : Any = use_attention_mask
class snake_case_ ( __A ):
'''simple docstring'''
lowerCamelCase = [R"pooler", R"logit_scale"]
lowerCamelCase = [R"position_ids", R"predictions.decoder.bias"]
lowerCamelCase = "roberta"
lowerCamelCase = RobertaSeriesConfig
def __init__( self : List[Any] , __magic_name__ : List[str] ) -> Optional[Any]:
super().__init__(__magic_name__ )
lowerCamelCase_ : Dict = XLMRobertaModel(__magic_name__ )
lowerCamelCase_ : Optional[int] = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase_ : Optional[Any] = getattr(__magic_name__ , "has_pre_transformation" , __magic_name__ )
if self.has_pre_transformation:
lowerCamelCase_ : Optional[Any] = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase_ : Dict = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[torch.Tensor] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , __magic_name__ : Optional[bool] = None , ) -> Any:
lowerCamelCase_ : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ : List[Any] = self.base_model(
input_ids=__magic_name__ , attention_mask=__magic_name__ , token_type_ids=__magic_name__ , position_ids=__magic_name__ , head_mask=__magic_name__ , inputs_embeds=__magic_name__ , encoder_hidden_states=__magic_name__ , encoder_attention_mask=__magic_name__ , output_attentions=__magic_name__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__magic_name__ , )
if self.has_pre_transformation:
lowerCamelCase_ : List[Any] = outputs["hidden_states"][-2]
lowerCamelCase_ : Any = self.pre_LN(__magic_name__ )
lowerCamelCase_ : str = self.transformation_pre(__magic_name__ )
return TransformationModelOutput(
projection_state=__magic_name__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowerCamelCase_ : int = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__magic_name__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 488 | 1 |
"""simple docstring"""
from __future__ import annotations
A = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class _UpperCamelCase :
"""simple docstring"""
def __init__( self : List[Any] , snake_case : dict[str, list[str]] , snake_case : str ) -> Union[str, Any]:
'''simple docstring'''
__magic_name__ : List[Any] = graph
# mapping node to its parent in resulting breadth first tree
__magic_name__ : int = {}
__magic_name__ : Optional[Any] = source_vertex
def _UpperCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
__magic_name__ : Optional[int] = {self.source_vertex}
__magic_name__ : Any = None
__magic_name__ : Any = [self.source_vertex] # first in first out queue
while queue:
__magic_name__ : List[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_lowerCamelCase )
__magic_name__ : Optional[int] = vertex
queue.append(_lowerCamelCase )
def _UpperCAmelCase ( self : List[str] , snake_case : str ) -> Optional[Any]:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
__magic_name__ : Tuple = self.parent.get(_lowerCamelCase )
if target_vertex_parent is None:
__magic_name__ : int = (
f"""No path from vertex: {self.source_vertex} to vertex: {target_vertex}"""
)
raise ValueError(_lowerCamelCase )
return self.shortest_path(_lowerCamelCase ) + f"""->{target_vertex}"""
if __name__ == "__main__":
A = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 701 |
"""simple docstring"""
from math import loga
def UpperCamelCase_ ( lowerCamelCase : int ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(lowerCamelCase , lowerCamelCase ):
raise TypeError('''Input value must be a \'int\' type''' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 147 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
_lowerCamelCase = logging.getLogger(__name__)
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Any ="summarization"
__A : Dict =["loss"]
__A : Dict =ROUGE_KEYS
__A : Union[str, Any] ="rouge2"
def __init__( self ,_snake_case ,**_snake_case ):
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ : str = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(_snake_case ,num_labels=_snake_case ,mode=self.mode ,**_snake_case )
use_task_specific_params(self.model ,"summarization" )
save_git_info(self.hparams.output_dir )
UpperCAmelCase_ : List[Any] = Path(self.output_dir ) / "metrics.json"
UpperCAmelCase_ : Union[str, Any] = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams ,self.hparams_save_path )
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : int = defaultdict(_snake_case )
UpperCAmelCase_ : str = self.config.model_type
UpperCAmelCase_ : int = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
UpperCAmelCase_ : dict = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ : int = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
UpperCAmelCase_ : Optional[Any] = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ : int = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase_ : Union[str, Any] = get_git_info()["repo_sha"]
UpperCAmelCase_ : Dict = hparams.num_workers
UpperCAmelCase_ : Optional[int] = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer ,_snake_case ):
UpperCAmelCase_ : str = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ : Any = self.decoder_start_token_id
UpperCAmelCase_ : Optional[Any] = (
SeqaSeqDataset if hasattr(self.tokenizer ,"prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
UpperCAmelCase_ : Any = False
UpperCAmelCase_ : Optional[Any] = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ : Tuple = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ : Union[str, Any] = self.model.config.max_length
UpperCAmelCase_ : Optional[Any] = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[str] = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(_snake_case ,Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} ,Path(self.output_dir ) / "tok_batch.json" )
UpperCAmelCase_ : Optional[Any] = True
return readable_batch
def UpperCamelCase__ ( self ,_snake_case ,**_snake_case ):
return self.model(_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(
_snake_case ,skip_special_tokens=_snake_case ,clean_up_tokenization_spaces=_snake_case )
return lmap(str.strip ,_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : List[Any] = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = batch["input_ids"], batch["attention_mask"]
UpperCAmelCase_ : Optional[Any] = batch["labels"]
if isinstance(self.model ,_snake_case ):
UpperCAmelCase_ : str = self.model._shift_right(_snake_case )
else:
UpperCAmelCase_ : int = shift_tokens_right(_snake_case ,_snake_case )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ : Tuple = decoder_input_ids
self.save_readable_batch(_snake_case )
UpperCAmelCase_ : int = self(_snake_case ,attention_mask=_snake_case ,decoder_input_ids=_snake_case ,use_cache=_snake_case )
UpperCAmelCase_ : Optional[int] = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ : List[Any] = nn.CrossEntropyLoss(ignore_index=_snake_case )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ : List[str] = ce_loss_fct(lm_logits.view(-1 ,lm_logits.shape[-1] ) ,tgt_ids.view(-1 ) )
else:
UpperCAmelCase_ : Any = nn.functional.log_softmax(_snake_case ,dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : str = label_smoothed_nll_loss(
_snake_case ,_snake_case ,self.hparams.label_smoothing ,ignore_index=_snake_case )
return (loss,)
@property
def UpperCamelCase__ ( self ):
return self.tokenizer.pad_token_id
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
UpperCAmelCase_ : Tuple = self._step(_snake_case )
UpperCAmelCase_ : Any = dict(zip(self.loss_names ,_snake_case ) )
# tokens per batch
UpperCAmelCase_ : Optional[int] = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
UpperCAmelCase_ : Tuple = batch["input_ids"].shape[0]
UpperCAmelCase_ : List[Any] = batch["input_ids"].eq(self.pad ).sum()
UpperCAmelCase_ : List[Any] = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
return self._generative_step(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case="val" ):
self.step_count += 1
UpperCAmelCase_ : Optional[int] = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase_ : Tuple = losses["loss"]
UpperCAmelCase_ : int = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
UpperCAmelCase_ : List[Any] = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ : torch.FloatTensor = torch.tensor(_snake_case ).type_as(_snake_case )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(_snake_case )
UpperCAmelCase_ : str = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
UpperCAmelCase_ : Dict = self.step_count
self.metrics[prefix].append(_snake_case ) # callback writes this to self.metrics_save_path
UpperCAmelCase_ : Optional[int] = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
return calculate_rouge(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : int = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ : Optional[Any] = self.model.generate(
batch["input_ids"] ,attention_mask=batch["attention_mask"] ,use_cache=_snake_case ,decoder_start_token_id=self.decoder_start_token_id ,num_beams=self.eval_beams ,max_length=self.eval_max_length ,)
UpperCAmelCase_ : str = (time.time() - ta) / batch["input_ids"].shape[0]
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(_snake_case )
UpperCAmelCase_ : List[str] = self.ids_to_clean_text(batch["labels"] )
UpperCAmelCase_ : Tuple = self._step(_snake_case )
UpperCAmelCase_ : List[Any] = dict(zip(self.loss_names ,_snake_case ) )
UpperCAmelCase_ : Dict = self.calc_generative_metrics(_snake_case ,_snake_case )
UpperCAmelCase_ : Dict = np.mean(lmap(_snake_case ,_snake_case ) )
base_metrics.update(gen_time=_snake_case ,gen_len=_snake_case ,preds=_snake_case ,target=_snake_case ,**_snake_case )
return base_metrics
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
return self._generative_step(_snake_case )
def UpperCamelCase__ ( self ,_snake_case ):
return self.validation_epoch_end(_snake_case ,prefix="test" )
def UpperCamelCase__ ( self ,_snake_case ):
UpperCAmelCase_ : Any = self.n_obs[type_path]
UpperCAmelCase_ : List[str] = self.target_lens[type_path]
UpperCAmelCase_ : Union[str, Any] = self.dataset_class(
self.tokenizer ,type_path=_snake_case ,n_obs=_snake_case ,max_target_length=_snake_case ,**self.dataset_kwargs ,)
return dataset
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = False ):
UpperCAmelCase_ : Any = self.get_dataset(_snake_case )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ : int = dataset.make_sortish_sampler(_snake_case ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case ,batch_size=_snake_case ,collate_fn=dataset.collate_fn ,shuffle=_snake_case ,num_workers=self.num_workers ,sampler=_snake_case ,)
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ : List[Any] = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch ,distributed=self.hparams.gpus > 1 )
return DataLoader(
_snake_case ,batch_sampler=_snake_case ,collate_fn=dataset.collate_fn ,num_workers=self.num_workers ,)
else:
return DataLoader(
_snake_case ,batch_size=_snake_case ,collate_fn=dataset.collate_fn ,shuffle=_snake_case ,num_workers=self.num_workers ,sampler=_snake_case ,)
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_dataloader("train" ,batch_size=self.hparams.train_batch_size ,shuffle=_snake_case )
return dataloader
def UpperCamelCase__ ( self ):
return self.get_dataloader("val" ,batch_size=self.hparams.eval_batch_size )
def UpperCamelCase__ ( self ):
return self.get_dataloader("test" ,batch_size=self.hparams.eval_batch_size )
@staticmethod
def UpperCamelCase__ ( _snake_case ,_snake_case ):
BaseTransformer.add_model_specific_args(_snake_case ,_snake_case )
add_generic_args(_snake_case ,_snake_case )
parser.add_argument(
"--max_source_length" ,default=10_24 ,type=_snake_case ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--max_target_length" ,default=56 ,type=_snake_case ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--val_max_target_length" ,default=1_42 ,type=_snake_case ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument(
"--test_max_target_length" ,default=1_42 ,type=_snake_case ,help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) ,)
parser.add_argument("--freeze_encoder" ,action="store_true" )
parser.add_argument("--freeze_embeds" ,action="store_true" )
parser.add_argument("--sortish_sampler" ,action="store_true" ,default=_snake_case )
parser.add_argument("--overwrite_output_dir" ,action="store_true" ,default=_snake_case )
parser.add_argument("--max_tokens_per_batch" ,type=_snake_case ,default=_snake_case )
parser.add_argument("--logger_name" ,type=_snake_case ,choices=["default", "wandb", "wandb_shared"] ,default="default" )
parser.add_argument("--n_train" ,type=_snake_case ,default=-1 ,required=_snake_case ,help="# examples. -1 means use all." )
parser.add_argument("--n_val" ,type=_snake_case ,default=5_00 ,required=_snake_case ,help="# examples. -1 means use all." )
parser.add_argument("--n_test" ,type=_snake_case ,default=-1 ,required=_snake_case ,help="# examples. -1 means use all." )
parser.add_argument(
"--task" ,type=_snake_case ,default="summarization" ,required=_snake_case ,help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" ,type=_snake_case ,default=0.0 ,required=_snake_case )
parser.add_argument("--src_lang" ,type=_snake_case ,default="" ,required=_snake_case )
parser.add_argument("--tgt_lang" ,type=_snake_case ,default="" ,required=_snake_case )
parser.add_argument("--eval_beams" ,type=_snake_case ,default=_snake_case ,required=_snake_case )
parser.add_argument(
"--val_metric" ,type=_snake_case ,default=_snake_case ,required=_snake_case ,choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" ,type=_snake_case ,default=_snake_case ,help="never generate more than n tokens" )
parser.add_argument("--save_top_k" ,type=_snake_case ,default=1 ,required=_snake_case ,help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" ,type=_snake_case ,default=-1 ,required=_snake_case ,help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) ,)
return parser
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Any ="translation"
__A : Optional[int] =["loss"]
__A : str =["bleu"]
__A : int ="bleu"
def __init__( self ,_snake_case ,**_snake_case ):
super().__init__(_snake_case ,**_snake_case )
UpperCAmelCase_ : Optional[int] = hparams.src_lang
UpperCAmelCase_ : Optional[Any] = hparams.tgt_lang
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ):
return calculate_bleu(_snake_case ,_snake_case )
def a__ ( _SCREAMING_SNAKE_CASE : Tuple , _SCREAMING_SNAKE_CASE : int=None ) -> SummarizationModule:
"""simple docstring"""
Path(args.output_dir ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
check_output_dir(_SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ : SummarizationModule = SummarizationModule(_SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ : SummarizationModule = TranslationModule(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
UpperCAmelCase_ : Tuple = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : str = os.environ.get("WANDB_PROJECT" , _SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = WandbLogger(name=model.output_dir.name , project=_SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ : List[Any] = WandbLogger(name=model.output_dir.name , project=F'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ : Optional[Any] = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : List[Any] = args.val_metric == "loss"
UpperCAmelCase_ : pl.Trainer = generic_train(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , _SCREAMING_SNAKE_CASE ) , early_stopping_callback=_SCREAMING_SNAKE_CASE , logger=_SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
UpperCAmelCase_ : Tuple = ""
UpperCAmelCase_ : str = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=_SCREAMING_SNAKE_CASE ) )
if checkpoints:
UpperCAmelCase_ : Optional[Any] = checkpoints[-1]
UpperCAmelCase_ : Tuple = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
_lowerCamelCase = pl.Trainer.add_argparse_args(parser)
_lowerCamelCase = SummarizationModule.add_model_specific_args(parser, os.getcwd())
_lowerCamelCase = parser.parse_args()
main(args)
| 71 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 | 1 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
_lowerCAmelCase = [
# tf -> hf
("""/""", """."""),
("""layer_""", """layers."""),
("""kernel""", """weight"""),
("""beta""", """bias"""),
("""gamma""", """weight"""),
("""pegasus""", """model"""),
]
_lowerCAmelCase = [
(""".output.dense""", """.fc2"""),
("""intermediate.LayerNorm""", """final_layer_norm"""),
("""intermediate.dense""", """fc1"""),
]
_lowerCAmelCase = (
INIT_COMMON
+ [
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.out_proj"""),
("""attention.self""", """self_attn"""),
("""attention.encdec.LayerNorm""", """encoder_attn_layer_norm"""),
("""attention.encdec_output.dense""", """encoder_attn.out_proj"""),
("""attention.encdec""", """encoder_attn"""),
("""key""", """k_proj"""),
("""value""", """v_proj"""),
("""query""", """q_proj"""),
("""decoder.LayerNorm""", """decoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCAmelCase = (
INIT_COMMON
+ [
("""embeddings.word_embeddings""", """shared.weight"""),
("""embeddings.position_embeddings""", """embed_positions.weight"""),
("""attention.self.LayerNorm""", """self_attn_layer_norm"""),
("""attention.output.dense""", """self_attn.output"""),
("""attention.self""", """self_attn.self"""),
("""encoder.LayerNorm""", """encoder.layernorm_embedding"""),
]
+ END_COMMON
)
_lowerCAmelCase = [
"""encdec/key/bias""",
"""encdec/query/bias""",
"""encdec/value/bias""",
"""self/key/bias""",
"""self/query/bias""",
"""self/value/bias""",
"""encdec_output/dense/bias""",
"""attention/output/dense/bias""",
]
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
for tf_name, hf_name in patterns:
_lowerCAmelCase : Optional[Any] = k.replace(lowercase__ , lowercase__ )
return k
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Any = BigBirdPegasusConfig(**lowercase__ )
_lowerCAmelCase : int = BigBirdPegasusForConditionalGeneration(lowercase__ )
_lowerCAmelCase : Dict = torch_model.state_dict()
_lowerCAmelCase : Union[str, Any] = {}
# separating decoder weights
_lowerCAmelCase : str = {k: tf_weights[k] for k in tf_weights if k.startswith('pegasus/decoder' )}
_lowerCAmelCase : Union[str, Any] = {k: tf_weights[k] for k in tf_weights if not k.startswith('pegasus/decoder' )}
for k, v in tqdm(decoder_weights.items() , 'tf -> hf conversion' ):
_lowerCAmelCase : int = [k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
_lowerCAmelCase : Optional[int] = DECODER_PATTERNS
_lowerCAmelCase : Union[str, Any] = rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowerCAmelCase : str = v.T
_lowerCAmelCase : Union[str, Any] = torch.from_numpy(lowercase__ )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items() , 'tf -> hf conversion' ):
_lowerCAmelCase : str = [k.endswith(lowercase__ ) for ending in KEYS_TO_IGNORE]
if any(lowercase__ ):
continue
_lowerCAmelCase : int = REMAINING_PATTERNS
_lowerCAmelCase : List[Any] = rename_state_dict_key(lowercase__ , lowercase__ )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ['dense', 'query', 'key', 'value'] ):
_lowerCAmelCase : Optional[Any] = v.T
_lowerCAmelCase : Dict = torch.from_numpy(lowercase__ )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
_lowerCAmelCase : Tuple = mapping['model.embed_positions.weight']
_lowerCAmelCase : str = mapping.pop('model.embed_positions.weight' )
_lowerCAmelCase, _lowerCAmelCase : Tuple = torch_model.load_state_dict(lowercase__ , strict=lowercase__ )
_lowerCAmelCase : Any = [
k
for k in missing
if k
not in [
'final_logits_bias',
'model.encoder.embed_tokens.weight',
'model.decoder.embed_tokens.weight',
'lm_head.weight',
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = tf.train.list_variables(lowercase__ )
_lowerCAmelCase : Optional[Any] = {}
_lowerCAmelCase : str = ['global_step']
for name, shape in tqdm(lowercase__ , desc='converting tf checkpoint to dict' ):
_lowerCAmelCase : Union[str, Any] = any(pat in name for pat in ignore_name )
if skip_key:
continue
_lowerCAmelCase : Optional[Any] = tf.train.load_variable(lowercase__ , lowercase__ )
_lowerCAmelCase : List[Any] = array
return tf_weights
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = get_tf_weights_as_numpy(lowercase__ )
_lowerCAmelCase : Optional[int] = convert_bigbird_pegasus(lowercase__ , lowercase__ )
torch_model.save_pretrained(lowercase__ )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--tf_ckpt_path""", type=str, help="""passed to tf.train.list_variables""")
parser.add_argument("""--save_dir""", default=None, type=str, help="""Path to the output PyTorch model.""")
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 701 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""vocab_file""": """vocab.json""",
"""tokenizer_config_file""": """tokenizer_config.json""",
"""merges_file""": """merges.txt""",
}
_lowerCAmelCase = {
"""vocab_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"""
),
},
"""tokenizer_config_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"""
),
},
"""merges_file""": {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"""
),
},
}
_lowerCAmelCase = """</w>"""
_lowerCAmelCase = """@@ """
def lowerCamelCase__ ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : List[str] = set()
_lowerCAmelCase : Dict = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_lowerCAmelCase : Any = char
return pairs
# Speech2Text2 has no max input length
_lowerCAmelCase = {"""facebook/s2t-wav2vec2-large-en-de""": 1_0_2_4}
class __UpperCamelCase ( a__ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ["input_ids", "attention_mask"]
def __init__( self ,_A ,_A="<s>" ,_A="<pad>" ,_A="</s>" ,_A="<unk>" ,_A=False ,_A=None ,**_A ,):
'''simple docstring'''
super().__init__(
unk_token=_A ,bos_token=_A ,eos_token=_A ,pad_token=_A ,do_lower_case=_A ,**_A ,)
_lowerCAmelCase : List[Any] = do_lower_case
with open(_A ,encoding='utf-8' ) as vocab_handle:
_lowerCAmelCase : Optional[int] = json.load(_A )
_lowerCAmelCase : Tuple = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(F"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_lowerCAmelCase : Optional[Any] = None
_lowerCAmelCase : Tuple = None
else:
with open(_A ,encoding='utf-8' ) as merges_handle:
_lowerCAmelCase : Optional[Any] = merges_handle.read().split('\n' )[:-1]
_lowerCAmelCase : List[str] = [tuple(merge.split()[:2] ) for merge in merges]
_lowerCAmelCase : List[Any] = dict(zip(_A ,range(len(_A ) ) ) )
_lowerCAmelCase : Union[str, Any] = {}
@property
def __lowerCamelCase ( self ):
'''simple docstring'''
return len(self.decoder )
def __lowerCamelCase ( self ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : str = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_lowerCAmelCase : str = get_pairs(_A )
if not pairs:
return token
while True:
_lowerCAmelCase : List[str] = min(_A ,key=lambda _A : self.bpe_ranks.get(_A ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_lowerCAmelCase, _lowerCAmelCase : Optional[int] = bigram
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Dict = 0
while i < len(_A ):
try:
_lowerCAmelCase : Dict = word.index(_A ,_A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_lowerCAmelCase : Optional[Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_lowerCAmelCase : Optional[Any] = tuple(_A )
_lowerCAmelCase : List[str] = new_word
if len(_A ) == 1:
break
else:
_lowerCAmelCase : List[str] = get_pairs(_A )
_lowerCAmelCase : Any = ' '.join(_A )
if word == "\n " + BPE_TOKEN_MERGES:
_lowerCAmelCase : str = '\n' + BPE_TOKEN_MERGES
if word.endswith(_A ):
_lowerCAmelCase : Dict = word.replace(_A ,'' )
_lowerCAmelCase : str = word.replace(' ' ,_A )
_lowerCAmelCase : str = word
return word
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
'This tokenizer was instantiated without a `merges.txt` file, so'
' that it can only be used for decoding, not for encoding.'
'Make sure to provide `merges.txt` file at instantiation to enable '
'encoding.' )
if self.do_lower_case:
_lowerCAmelCase : Optional[Any] = text.lower()
_lowerCAmelCase : Tuple = text.split()
_lowerCAmelCase : Union[str, Any] = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_A ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
return self.encoder.get(_A ,self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : int = self.decoder.get(_A ,self.unk_token )
return result
def __lowerCamelCase ( self ,_A ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = ' '.join(_A )
# make sure @@ tokens are concatenated
_lowerCAmelCase : int = ''.join(string.split(_A ) )
return string
def __lowerCamelCase ( self ,_A ,_A = None ):
'''simple docstring'''
if not os.path.isdir(_A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
_lowerCAmelCase : List[Any] = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase : str = os.path.join(
_A ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_A ,ensure_ascii=_A ) + '\n' )
_lowerCAmelCase : str = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_A ,'w' ,encoding='utf-8' ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
_lowerCAmelCase : Dict = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return (vocab_file, merges_file)
| 16 | 0 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class lowerCAmelCase__ ( lowercase_ , lowercase_ ):
"""simple docstring"""
__UpperCAmelCase : Any = "pixel_values"
__UpperCAmelCase : int = False
__UpperCAmelCase : int = TimmBackboneConfig
def __init__( self : Union[str, Any] , lowercase__ : Union[str, Any] , **lowercase__ : Tuple ):
requires_backends(self , "timm" )
super().__init__(lowercase__ )
__lowercase : List[Any] = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(lowercase__ , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
__lowercase : Union[str, Any] = getattr(lowercase__ , "use_pretrained_backbone" , lowercase__ )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
__lowercase : Tuple = config.out_indices if getattr(lowercase__ , "out_indices" , lowercase__ ) is not None else (-1,)
__lowercase : Union[str, Any] = timm.create_model(
config.backbone , pretrained=lowercase__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowercase__ , **lowercase__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
__lowercase : Union[str, Any] = self._backbone.return_layers
__lowercase : List[str] = {layer["module"]: str(lowercase__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowercase__ )
@classmethod
def snake_case ( cls : Tuple , lowercase__ : Optional[Any] , *lowercase__ : Optional[Any] , **lowercase__ : str ):
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
__lowercase : Optional[Any] = kwargs.pop("config" , TimmBackboneConfig() )
__lowercase : Tuple = kwargs.pop("use_timm_backbone" , lowercase__ )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
__lowercase : Any = kwargs.pop("num_channels" , config.num_channels )
__lowercase : int = kwargs.pop("features_only" , config.features_only )
__lowercase : Dict = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
__lowercase : Optional[Any] = kwargs.pop("out_indices" , config.out_indices )
__lowercase : int = TimmBackboneConfig(
backbone=lowercase__ , num_channels=lowercase__ , features_only=lowercase__ , use_pretrained_backbone=lowercase__ , out_indices=lowercase__ , )
return super()._from_config(lowercase__ , **lowercase__ )
def snake_case ( self : Optional[Any] , lowercase__ : Optional[int] ):
pass
def snake_case ( self : Tuple , lowercase__ : List[Any] , lowercase__ : Any=None , lowercase__ : Dict=None , lowercase__ : Optional[int]=None , **lowercase__ : int ):
__lowercase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase : Optional[int] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase : List[str] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
__lowercase : Any = self._all_layers
__lowercase : Tuple = self._backbone(lowercase__ , **lowercase__ )
__lowercase : int = self._return_layers
__lowercase : Any = tuple(hidden_states[i] for i in self.out_indices )
else:
__lowercase : Optional[Any] = self._backbone(lowercase__ , **lowercase__ )
__lowercase : Tuple = None
__lowercase : Dict = tuple(lowercase__ )
__lowercase : List[Any] = tuple(lowercase__ ) if hidden_states is not None else None
if not return_dict:
__lowercase : Dict = (feature_maps,)
if output_hidden_states:
__lowercase : Any = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowercase__ , hidden_states=lowercase__ , attentions=lowercase__ )
| 575 |
'''simple docstring'''
import os
import unittest
from transformers import LxmertTokenizer, LxmertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( lowercase_ , unittest.TestCase ):
_UpperCAmelCase =LxmertTokenizer
_UpperCAmelCase =LxmertTokenizerFast
_UpperCAmelCase =True
_UpperCAmelCase =True
def _lowerCAmelCase ( self: Dict) ->int:
'''simple docstring'''
super().setUp()
a_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
with open(self.vocab_file , "w" , encoding="utf-8") as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens]))
def _lowerCAmelCase ( self: Optional[Any] , a: Dict) ->Optional[Any]:
'''simple docstring'''
a_ = "UNwant\u00E9d,running"
a_ = "unwanted, running"
return input_text, output_text
def _lowerCAmelCase ( self: List[Any]) ->Optional[int]:
'''simple docstring'''
a_ = self.tokenizer_class(self.vocab_file)
a_ = tokenizer.tokenize("UNwant\u00E9d,running")
self.assertListEqual(a , ["un", "##want", "##ed", ",", "runn", "##ing"])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [7, 4, 5, 10, 8, 9])
def _lowerCAmelCase ( self: List[Any]) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
a_ = self.get_tokenizer()
a_ = self.get_rust_tokenizer()
a_ = "I was born in 92000, and this is falsé."
a_ = tokenizer.tokenize(a)
a_ = rust_tokenizer.tokenize(a)
self.assertListEqual(a , a)
a_ = tokenizer.encode(a , add_special_tokens=a)
a_ = rust_tokenizer.encode(a , add_special_tokens=a)
self.assertListEqual(a , a)
a_ = self.get_rust_tokenizer()
a_ = tokenizer.encode(a)
a_ = rust_tokenizer.encode(a)
self.assertListEqual(a , a)
| 685 | 0 |
"""simple docstring"""
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
__lowerCAmelCase : Any =logging.getLogger(__name__)
class _A :
def __init__( self ):
"""simple docstring"""
lowercase = False
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if not self.initialized:
lowercase = RagRetriever(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowercase = True
def A__ ( self ):
"""simple docstring"""
self.retriever.index.init_index()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
lowercase , lowercase = self.retriever._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return doc_ids, retrieved_doc_embeds
class _A ( lowerCAmelCase ):
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None ):
"""simple docstring"""
if index is not None and index.is_initialized() and len(__lowerCAmelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you'll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , index=__lowerCAmelCase , init_retrieval=__lowerCAmelCase , )
lowercase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
for worker in self.retrieval_workers
] )
def A__ ( self ):
"""simple docstring"""
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def A__ ( self , __lowerCAmelCase , __lowerCAmelCase ):
"""simple docstring"""
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
lowercase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
lowercase , lowercase = ray.get(random_worker.retrieve.remote(__lowerCAmelCase , __lowerCAmelCase ) )
else:
lowercase , lowercase = self._main_retrieve(__lowerCAmelCase , __lowerCAmelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
return super(__lowerCAmelCase , cls ).get_tokenizers(__lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase )
@classmethod
def A__ ( cls , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = kwargs.pop("""config""" , __lowerCAmelCase ) or RagConfig.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase )
lowercase = RagTokenizer.from_pretrained(__lowerCAmelCase , config=__lowerCAmelCase )
lowercase = rag_tokenizer.question_encoder
lowercase = rag_tokenizer.generator
if indexed_dataset is not None:
lowercase = """custom"""
lowercase = CustomHFIndex(config.retrieval_vector_size , __lowerCAmelCase )
else:
lowercase = cls._build_index(__lowerCAmelCase )
return cls(
__lowerCAmelCase , question_encoder_tokenizer=__lowerCAmelCase , generator_tokenizer=__lowerCAmelCase , retrieval_workers=__lowerCAmelCase , index=__lowerCAmelCase , )
| 197 | """simple docstring"""
import tempfile
import torch
from diffusers import IPNDMScheduler
from .test_schedulers import SchedulerCommonTest
class _A ( lowerCAmelCase ):
snake_case__ : Union[str, Any] = (IPNDMScheduler,)
snake_case__ : List[str] = (('num_inference_steps', 50),)
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = {"""num_train_timesteps""": 1000}
config.update(**__lowerCAmelCase )
return config
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[:]
if time_step is None:
lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals
lowercase = dummy_past_residuals[:]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self ):
"""simple docstring"""
pass
def A__ ( self , __lowerCAmelCase=0 , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residuals (must be after setting timesteps)
lowercase = dummy_past_residuals[:]
if time_step is None:
lowercase = scheduler.timesteps[len(scheduler.timesteps ) // 2]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__lowerCAmelCase )
lowercase = scheduler_class.from_pretrained(__lowerCAmelCase )
# copy over dummy past residuals
new_scheduler.set_timesteps(__lowerCAmelCase )
# copy over dummy past residual (must be after setting timesteps)
lowercase = dummy_past_residuals[:]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = new_scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self , **__lowerCAmelCase ):
"""simple docstring"""
lowercase = self.scheduler_classes[0]
lowercase = self.get_scheduler_config(**__lowerCAmelCase )
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = 10
lowercase = self.dummy_model()
lowercase = self.dummy_sample_deter
scheduler.set_timesteps(__lowerCAmelCase )
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
for i, t in enumerate(scheduler.timesteps ):
lowercase = model(__lowerCAmelCase , __lowerCAmelCase )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ).prev_sample
return sample
def A__ ( self ):
"""simple docstring"""
lowercase = dict(self.forward_default_kwargs )
lowercase = kwargs.pop("""num_inference_steps""" , __lowerCAmelCase )
for scheduler_class in self.scheduler_classes:
lowercase = self.get_scheduler_config()
lowercase = scheduler_class(**__lowerCAmelCase )
lowercase = self.dummy_sample
lowercase = 0.1 * sample
if num_inference_steps is not None and hasattr(__lowerCAmelCase , """set_timesteps""" ):
scheduler.set_timesteps(__lowerCAmelCase )
elif num_inference_steps is not None and not hasattr(__lowerCAmelCase , """set_timesteps""" ):
lowercase = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
lowercase = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
lowercase = dummy_past_residuals[:]
lowercase = scheduler.timesteps[5]
lowercase = scheduler.timesteps[6]
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
lowercase = scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self ):
"""simple docstring"""
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=__lowerCAmelCase , time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=__lowerCAmelCase , time_step=__lowerCAmelCase )
def A__ ( self ):
"""simple docstring"""
lowercase = self.full_loop()
lowercase = torch.mean(torch.abs(__lowerCAmelCase ) )
assert abs(result_mean.item() - 254_0529 ) < 10
| 197 | 1 |
'''simple docstring'''
from __future__ import annotations
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a__ , a__ = array[indexa], array[indexa]
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if length > 1:
a__ = int(length / 2 )
for i in range(_lowercase , low + middle ):
comp_and_swap(_lowercase , _lowercase , i + middle , _lowercase )
bitonic_merge(_lowercase , _lowercase , _lowercase , _lowercase )
bitonic_merge(_lowercase , low + middle , _lowercase , _lowercase )
def _lowerCAmelCase (_lowercase , _lowercase , _lowercase , _lowercase ):
"""simple docstring"""
if length > 1:
a__ = int(length / 2 )
bitonic_sort(_lowercase , _lowercase , _lowercase , 1 )
bitonic_sort(_lowercase , low + middle , _lowercase , 0 )
bitonic_merge(_lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
UpperCamelCase_ : List[str] = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase_ : Tuple = [int(item.strip()) for item in user_input.split(""",""")]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print("""\nSorted array in ascending order is: """, end="""""")
print(*unsorted, sep=""", """)
bitonic_merge(unsorted, 0, len(unsorted), 0)
print("""Sorted array in descending order is: """, end="""""")
print(*unsorted, sep=""", """)
| 331 |
'''simple docstring'''
import json
import sys
def _lowerCAmelCase (_lowercase , _lowercase ):
"""simple docstring"""
with open(_lowercase , encoding="utf-8" ) as f:
a__ = json.load(_lowercase )
a__ = ["<details>", "<summary>Show updated benchmarks!</summary>", " "]
for benchmark_name in sorted(_lowercase ):
a__ = results[benchmark_name]
a__ = benchmark_name.split("/" )[-1]
output_md.append(F'### Benchmark: {benchmark_file_name}' )
a__ = "| metric |"
a__ = "|--------|"
a__ = "| new / old (diff) |"
for metric_name in sorted(_lowercase ):
a__ = benchmark_res[metric_name]
a__ = metric_vals["new"]
a__ = metric_vals.get("old" , _lowercase )
a__ = metric_vals.get("diff" , _lowercase )
a__ = F' {new_val:f}' if isinstance(_lowercase , (int, float) ) else "None"
if old_val is not None:
val_str += F' / {old_val:f}' if isinstance(_lowercase , (int, float) ) else "None"
if dif_val is not None:
val_str += F' ({dif_val:f})' if isinstance(_lowercase , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("</details>" )
with open(_lowercase , "w" , encoding="utf-8" ) as f:
f.writelines("\n".join(_lowercase ) )
if __name__ == "__main__":
UpperCamelCase_ : Dict = sys.argv[1]
UpperCamelCase_ : int = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 331 | 1 |
'''simple docstring'''
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _snake_case ( __snake_case , __snake_case = True , __snake_case = math.inf , __snake_case = -math.inf , __snake_case = math.inf , __snake_case = -math.inf , __snake_case = False , __snake_case = 100 , __snake_case = 0.01 , __snake_case = 1 , ):
_UpperCamelCase = False
_UpperCamelCase = search_prob
_UpperCamelCase = start_temperate
_UpperCamelCase = []
_UpperCamelCase = 0
_UpperCamelCase = None
while not search_end:
_UpperCamelCase = current_state.score()
if best_state is None or current_score > best_state.score():
_UpperCamelCase = current_state
scores.append(SCREAMING_SNAKE_CASE_ )
iterations += 1
_UpperCamelCase = None
_UpperCamelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
_UpperCamelCase = random.randint(0 , len(SCREAMING_SNAKE_CASE_ ) - 1 ) # picking a random neighbor
_UpperCamelCase = neighbors.pop(SCREAMING_SNAKE_CASE_ )
_UpperCamelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
_UpperCamelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
_UpperCamelCase = picked_neighbor
else:
_UpperCamelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
_UpperCamelCase = picked_neighbor
_UpperCamelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
_UpperCamelCase = True
else:
_UpperCamelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
plt.xlabel('''Iterations''' )
plt.ylabel('''Function values''' )
plt.show()
return best_state
if __name__ == "__main__":
def _snake_case ( __snake_case , __snake_case ):
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
_lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(
prob, find_max=False, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
_lowerCAmelCase = SearchProblem(x=12, y=47, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(
prob, find_max=True, max_x=100, min_x=5, max_y=50, min_y=-5, visualization=True
)
print(
"The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 "
f'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _snake_case ( __snake_case , __snake_case ):
return (3 * x**2) - (6 * y)
_lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(prob, find_max=False, visualization=True)
print(
"The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
_lowerCAmelCase = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
_lowerCAmelCase = simulated_annealing(prob, find_max=True, visualization=True)
print(
"The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: "
f'{local_min.score()}'
)
| 700 | from sklearn.metrics import mean_squared_error
import datasets
_lowerCAmelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
_lowerCAmelCase = "\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n"
_lowerCAmelCase = "\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n \"raw_values\" : Returns a full set of errors in case of multioutput input.\n\n \"uniform_average\" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric(\"mse\")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {'mse': 0.6123724356957945}\n\n If you're using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'mse': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mse': array([0.41666667, 1. ])}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
def UpperCamelCase_ ( self : Optional[int] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def UpperCamelCase_ ( self : Dict ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def UpperCamelCase_ ( self : Any , _A : List[Any] , _A : List[str] , _A : Dict=None , _A : List[str]="uniform_average" , _A : int=True ):
_UpperCamelCase = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse}
| 71 | 0 |
'''simple docstring'''
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
parser.add_argument(
"""--txt2img_unclip""",
default="""kakaobrain/karlo-v1-alpha""",
type=str,
required=False,
help="""The pretrained txt2img unclip.""",
)
__snake_case =parser.parse_args()
__snake_case =UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
__snake_case =CLIPImageProcessor()
__snake_case =CLIPVisionModelWithProjection.from_pretrained("""openai/clip-vit-large-patch14""")
__snake_case =UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 133 |
'''simple docstring'''
from multiprocessing import Lock, Pipe, Process
# lock used to ensure that two processes do not access a pipe at the same time
__snake_case =Lock()
def a_ ( lowerCamelCase : int , lowerCamelCase : List[str] , lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] ):
global process_lock
# we perform n swaps since after n swaps we know we are sorted
# we *could* stop early if we are sorted already, but it takes as long to
# find out we are sorted as it does to sort the list with this algorithm
for i in range(0 , 10 ):
if (i + position) % 2 == 0 and r_send is not None:
# send your value to your right neighbor
process_lock.acquire()
r_send[1].send(lowerCamelCase )
process_lock.release()
# receive your right neighbor's value
process_lock.acquire()
lowerCAmelCase = rr_cv[0].recv()
process_lock.release()
# take the lower value since you are on the left
lowerCAmelCase = min(lowerCamelCase , lowerCamelCase )
elif (i + position) % 2 != 0 and l_send is not None:
# send your value to your left neighbor
process_lock.acquire()
l_send[1].send(lowerCamelCase )
process_lock.release()
# receive your left neighbor's value
process_lock.acquire()
lowerCAmelCase = lr_cv[0].recv()
process_lock.release()
# take the higher value since you are on the right
lowerCAmelCase = max(lowerCamelCase , lowerCamelCase )
# after all swaps are performed, send the values back to main
result_pipe[1].send(lowerCamelCase )
def a_ ( lowerCamelCase : Any ):
lowerCAmelCase = []
lowerCAmelCase = []
# initialize the list of pipes where the values will be retrieved
for _ in arr:
result_pipe.append(Pipe() )
# creates the processes
# the first and last process only have one neighbor so they are made outside
# of the loop
lowerCAmelCase = Pipe()
lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=lowerCamelCase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) )
lowerCAmelCase = temp_rs
lowerCAmelCase = temp_rr
for i in range(1 , len(lowerCamelCase ) - 1 ):
lowerCAmelCase = Pipe()
lowerCAmelCase = Pipe()
process_array_.append(
Process(
target=lowerCamelCase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) )
lowerCAmelCase = temp_rs
lowerCAmelCase = temp_rr
process_array_.append(
Process(
target=lowerCamelCase , args=(
len(lowerCamelCase ) - 1,
arr[len(lowerCamelCase ) - 1],
temp_ls,
None,
temp_lr,
None,
result_pipe[len(lowerCamelCase ) - 1],
) , ) )
# start the processes
for p in process_array_:
p.start()
# wait for the processes to end and write their values to the list
for p in range(0 , len(lowerCamelCase ) ):
lowerCAmelCase = result_pipe[p][0].recv()
process_array_[p].join()
return arr
def a_ ( ):
lowerCAmelCase = list(range(10 , 0 , -1 ) )
print('Initial List' )
print(*lowerCamelCase )
lowerCAmelCase = odd_even_transposition(lowerCamelCase )
print('Sorted List\n' )
print(*lowerCamelCase )
if __name__ == "__main__":
main()
| 133 | 1 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
__UpperCamelCase : List[Any] = data_utils.TransfoXLTokenizer
__UpperCamelCase : Dict = data_utils.TransfoXLCorpus
__UpperCamelCase : int = data_utils
__UpperCamelCase : str = data_utils
def _a ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(SCREAMING_SNAKE_CASE , '''rb''' ) as fp:
UpperCamelCase__ : str = pickle.load(SCREAMING_SNAKE_CASE , encoding='''latin1''' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
UpperCamelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file''']
print(F"Save vocabulary to {pytorch_vocab_dump_path}" )
UpperCamelCase__ : List[Any] = corpus.vocab.__dict__
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = corpus.__dict__
corpus_dict_no_vocab.pop('''vocab''' , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Any = pytorch_dump_folder_path + '''/''' + CORPUS_NAME
print(F"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
UpperCamelCase__ : List[str] = os.path.abspath(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = os.path.abspath(SCREAMING_SNAKE_CASE )
print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
UpperCamelCase__ : Dict = TransfoXLConfig()
else:
UpperCamelCase__ : Optional[int] = TransfoXLConfig.from_json_file(SCREAMING_SNAKE_CASE )
print(F"Building PyTorch model from configuration: {config}" )
UpperCamelCase__ : str = TransfoXLLMHeadModel(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Tuple = load_tf_weights_in_transfo_xl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
UpperCamelCase__ : Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"Save PyTorch model to {os.path.abspath(SCREAMING_SNAKE_CASE )}" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {os.path.abspath(SCREAMING_SNAKE_CASE )}" )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the folder to store the PyTorch model or dataset/vocab.",
)
parser.add_argument(
"--tf_checkpoint_path",
default="",
type=str,
help="An optional path to a TensorFlow checkpoint path to be converted.",
)
parser.add_argument(
"--transfo_xl_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--transfo_xl_dataset_file",
default="",
type=str,
help="An optional dataset file to be converted in a vocabulary.",
)
__UpperCamelCase : int = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 716 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
__UpperCamelCase : Tuple = None
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : Any = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCamelCase : Tuple = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
__UpperCamelCase : Optional[Any] = {
"camembert-base": 512,
}
__UpperCamelCase : Optional[Any] = "▁"
class __magic_name__ ( __lowerCAmelCase):
A: int = VOCAB_FILES_NAMES
A: Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A: List[str] = ["input_ids", "attention_mask"]
A: Dict = CamembertTokenizer
def __init__( self : int , lowerCamelCase__ : Any=None , lowerCamelCase__ : Any=None , lowerCamelCase__ : Dict="<s>" , lowerCamelCase__ : List[str]="</s>" , lowerCamelCase__ : int="</s>" , lowerCamelCase__ : List[str]="<s>" , lowerCamelCase__ : Optional[int]="<unk>" , lowerCamelCase__ : Union[str, Any]="<pad>" , lowerCamelCase__ : List[Any]="<mask>" , lowerCamelCase__ : List[Any]=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase__ : Optional[int] , ) -> str:
'''simple docstring'''
UpperCamelCase__ : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
lowerCamelCase__ , tokenizer_file=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , additional_special_tokens=lowerCamelCase__ , **lowerCamelCase__ , )
UpperCamelCase__ : Tuple = vocab_file
UpperCamelCase__ : Optional[Any] = False if not self.vocab_file else True
def UpperCAmelCase__ ( self : Any , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase__ : Any = [self.cls_token_id]
UpperCamelCase__ : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : List[int] , lowerCamelCase__ : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = [self.sep_token_id]
UpperCamelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCamelCase__ : List[str] = os.path.join(
lowerCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ):
copyfile(self.vocab_file , lowerCamelCase__ )
return (out_vocab_file,)
| 106 | 0 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
_lowerCAmelCase : Any =logging.get_logger(__name__)
_lowerCAmelCase : List[Any] ={
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
_lowerCAmelCase : List[Any] ={
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
_lowerCAmelCase : int ={"""facebook/blenderbot-3B""": 1_28}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def _A ( ):
UpperCAmelCase__: List[Any] = (
list(range(ord("!" ) ,ord("~" ) + 1 ) ) + list(range(ord("¡" ) ,ord("¬" ) + 1 ) ) + list(range(ord("®" ) ,ord("ÿ" ) + 1 ) )
)
UpperCAmelCase__: Any = bs[:]
UpperCAmelCase__: List[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(SCREAMING_SNAKE_CASE )
cs.append(2**8 + n )
n += 1
UpperCAmelCase__: int = [chr(SCREAMING_SNAKE_CASE ) for n in cs]
return dict(zip(SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE ) )
def _A ( SCREAMING_SNAKE_CASE ):
UpperCAmelCase__: Optional[Any] = set()
UpperCAmelCase__: Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__: Optional[Any] = char
return pairs
class __UpperCamelCase ( UpperCAmelCase__ ):
'''simple docstring'''
__magic_name__ = VOCAB_FILES_NAMES
__magic_name__ = PRETRAINED_VOCAB_FILES_MAP
__magic_name__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__magic_name__ = ['input_ids', 'attention_mask']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ):
UpperCAmelCase__: List[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
UpperCAmelCase__: str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
UpperCAmelCase__: Optional[int] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
UpperCAmelCase__: int = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
UpperCAmelCase__: Optional[int] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
UpperCAmelCase__: str = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase__: Optional[Any] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__: int = json.load(lowerCamelCase__ )
UpperCAmelCase__: int = {v: k for k, v in self.encoder.items()}
UpperCAmelCase__: str = errors # how to handle errors in decoding
UpperCAmelCase__: List[str] = bytes_to_unicode()
UpperCAmelCase__: List[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__: int = merges_handle.read().split("\n" )[1:-1]
UpperCAmelCase__: List[str] = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase__: List[str] = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
UpperCAmelCase__: Any = {}
UpperCAmelCase__: int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase__: Any = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _UpperCAmelCase ( self ):
return len(self.encoder )
def _UpperCAmelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
if token in self.cache:
return self.cache[token]
UpperCAmelCase__: str = tuple(lowerCamelCase__ )
UpperCAmelCase__: List[Any] = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
UpperCAmelCase__: Union[str, Any] = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__: List[Any] = bigram
UpperCAmelCase__: str = []
UpperCAmelCase__: Optional[Any] = 0
while i < len(lowerCamelCase__ ):
try:
UpperCAmelCase__: Optional[Any] = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__: Union[str, Any] = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__: Optional[Any] = tuple(lowerCamelCase__ )
UpperCAmelCase__: List[str] = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
UpperCAmelCase__: Optional[Any] = get_pairs(lowerCamelCase__ )
UpperCAmelCase__: List[Any] = """ """.join(lowerCamelCase__ )
UpperCAmelCase__: Tuple = word
return word
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: Dict = []
for token in re.findall(self.pat , lowerCamelCase__ ):
UpperCAmelCase__: List[str] = """""".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(" " ) )
return bpe_tokens
def _UpperCAmelCase ( self , lowerCamelCase__ ):
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
return self.decoder.get(lowerCamelCase__ )
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: List[str] = """""".join(lowerCamelCase__ )
UpperCAmelCase__: Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase__: str = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__: Dict = os.path.join(
lowerCamelCase__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + "\n" )
UpperCAmelCase__: Any = 0
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__: Optional[Any] = token_index
writer.write(" ".join(lowerCamelCase__ ) + "\n" )
index += 1
return vocab_file, merge_file
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
UpperCAmelCase__: Tuple = [self.sep_token_id]
UpperCAmelCase__: Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ):
UpperCAmelCase__: Optional[Any] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
UpperCAmelCase__: Tuple = """ """ + text
return (text, kwargs)
def _UpperCAmelCase ( self , lowerCamelCase__ , lowerCamelCase__ = None ):
return token_ids_a + [self.eos_token_id]
def _UpperCAmelCase ( self , lowerCamelCase__ ):
UpperCAmelCase__: Optional[int] = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(lowerCamelCase__ )
UpperCAmelCase__: Any = """ """.join(lowerCamelCase__ )
UpperCAmelCase__: str = self.encode(lowerCamelCase__ )
if len(lowerCamelCase__ ) > self.model_max_length:
UpperCAmelCase__: Any = input_ids[-self.model_max_length :]
logger.warning(F"Trimmed input from conversation as it was longer than {self.model_max_length} tokens." )
return input_ids | 113 |
"""simple docstring"""
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class a ( UpperCAmelCase__ ):
def __init__( self : Any , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Union[str, Any] , ) -> str:
'''simple docstring'''
super().__init__(
lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
SCREAMING_SNAKE_CASE_: Union[str, Any] =path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE_: str =Text(
cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , **lowerCAmelCase , )
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
if self.streaming:
SCREAMING_SNAKE_CASE_: Union[str, Any] =self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE_: List[Any] =None
SCREAMING_SNAKE_CASE_: Tuple =None
SCREAMING_SNAKE_CASE_: List[Any] =None
SCREAMING_SNAKE_CASE_: Dict =None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE_: List[str] =self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
| 409 | 0 |
'''simple docstring'''
import warnings
from functools import wraps
from typing import Callable
def A ( A_ : Callable ):
@wraps(A_ )
def _inner_fn(*A_ : List[Any] , **A_ : Union[str, Any] ):
warnings.warn(
(F"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , A_ , )
return fn(*A_ , **A_ )
return _inner_fn
| 555 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse("0.8.3"):
raise Exception("requires gluonnlp == 0.8.3")
if version.parse(mx.__version__) != version.parse("1.5.0"):
raise Exception("requires mxnet == 1.5.0")
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = "The Nymphenburg Palace is a beautiful palace in Munich!"
def A ( A_ : str , A_ : str ):
snake_case : str = {
'''attention_cell''': '''multi_head''',
'''num_layers''': 4,
'''units''': 1024,
'''hidden_size''': 768,
'''max_length''': 512,
'''num_heads''': 8,
'''scaled''': True,
'''dropout''': 0.1,
'''use_residual''': True,
'''embed_size''': 1024,
'''embed_dropout''': 0.1,
'''word_embed''': None,
'''layer_norm_eps''': 1e-5,
'''token_type_vocab_size''': 2,
}
snake_case : int = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
snake_case : int = BERTEncoder(
attention_cell=predefined_args['''attention_cell'''] , num_layers=predefined_args['''num_layers'''] , units=predefined_args['''units'''] , hidden_size=predefined_args['''hidden_size'''] , max_length=predefined_args['''max_length'''] , num_heads=predefined_args['''num_heads'''] , scaled=predefined_args['''scaled'''] , dropout=predefined_args['''dropout'''] , output_attention=A_ , output_all_encodings=A_ , use_residual=predefined_args['''use_residual'''] , activation=predefined_args.get('''activation''' , '''gelu''' ) , layer_norm_eps=predefined_args.get('''layer_norm_eps''' , A_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
snake_case : List[Any] = '''openwebtext_ccnews_stories_books_cased'''
# Specify download folder to Gluonnlp's vocab
snake_case : Optional[Any] = os.path.join(get_home_dir() , '''models''' )
snake_case : Dict = _load_vocab(A_ , A_ , A_ , cls=A_ )
snake_case : Tuple = nlp.model.BERTModel(
A_ , len(A_ ) , units=predefined_args['''units'''] , embed_size=predefined_args['''embed_size'''] , embed_dropout=predefined_args['''embed_dropout'''] , word_embed=predefined_args['''word_embed'''] , use_pooler=A_ , use_token_type_embed=A_ , token_type_vocab_size=predefined_args['''token_type_vocab_size'''] , use_classifier=A_ , use_decoder=A_ , )
original_bort.load_parameters(A_ , cast_dtype=A_ , ignore_extra=A_ )
snake_case : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
snake_case : Optional[int] = {
'''architectures''': ['''BertForMaskedLM'''],
'''attention_probs_dropout_prob''': predefined_args['''dropout'''],
'''hidden_act''': '''gelu''',
'''hidden_dropout_prob''': predefined_args['''dropout'''],
'''hidden_size''': predefined_args['''embed_size'''],
'''initializer_range''': 0.02,
'''intermediate_size''': predefined_args['''hidden_size'''],
'''layer_norm_eps''': predefined_args['''layer_norm_eps'''],
'''max_position_embeddings''': predefined_args['''max_length'''],
'''model_type''': '''bort''',
'''num_attention_heads''': predefined_args['''num_heads'''],
'''num_hidden_layers''': predefined_args['''num_layers'''],
'''pad_token_id''': 1, # 2 = BERT, 1 = RoBERTa
'''type_vocab_size''': 1, # 2 = BERT, 1 = RoBERTa
'''vocab_size''': len(A_ ),
}
snake_case : Optional[int] = BertConfig.from_dict(A_ )
snake_case : int = BertForMaskedLM(A_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A_ : List[str] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A_ : Tuple , A_ : Dict ):
snake_case : Tuple = hf_param.shape
snake_case : List[Any] = to_torch(params[gluon_param] )
snake_case : int = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
snake_case : List[str] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , '''word_embed.0.weight''' )
snake_case : Optional[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , '''encoder.position_weight''' )
snake_case : Dict = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , '''encoder.layer_norm.beta''' )
snake_case : Tuple = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , '''encoder.layer_norm.gamma''' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
snake_case : str = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
snake_case : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
snake_case : BertSelfAttention = layer.attention.self
snake_case : Optional[int] = check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
snake_case : Any = check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
snake_case : Tuple = check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
snake_case : Dict = check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
snake_case : int = check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
snake_case : int = check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
snake_case : BertSelfOutput = layer.attention.output
snake_case : Optional[Any] = check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
snake_case : Tuple = check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
snake_case : int = check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
snake_case : Optional[int] = check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
snake_case : BertIntermediate = layer.intermediate
snake_case : Optional[Any] = check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
snake_case : Optional[int] = check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
snake_case : BertOutput = layer.output
snake_case : List[Any] = check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
snake_case : int = check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
snake_case : List[str] = check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
snake_case : List[str] = check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
snake_case : Dict = RobertaTokenizer.from_pretrained('''roberta-base''' )
snake_case : Any = tokenizer.encode_plus(A_ )['''input_ids''']
# Get gluon output
snake_case : Dict = mx.nd.array([input_ids] )
snake_case : Optional[int] = original_bort(inputs=A_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A_ )
snake_case : Optional[int] = BertModel.from_pretrained(A_ )
hf_bort_model.eval()
snake_case : Any = tokenizer.encode_plus(A_ , return_tensors='''pt''' )
snake_case : Any = hf_bort_model(**A_ )[0]
snake_case : Optional[Any] = output_gluon[0].asnumpy()
snake_case : Union[str, Any] = output_hf[0].detach().numpy()
snake_case : Tuple = np.max(np.abs(hf_layer - gluon_layer ) ).item()
snake_case : Tuple = np.allclose(A_ , A_ , atol=1e-3 )
if success:
print('''✔️ Both model do output the same tensors''' )
else:
print('''❌ Both model do **NOT** output the same tensors''' )
print('''Absolute difference is:''' , A_ )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bort_checkpoint_path", default=None, type=str, required=True, help="Path the official Bort params file."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
UpperCAmelCase = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 555 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'caidas/swin2sr-classicalsr-x2-64': (
'https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'
),
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='swin2sr'
lowerCamelCase__ ={
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : Any , a : List[Any]=64 , a : List[str]=1 , a : int=3 , a : Union[str, Any]=180 , a : Union[str, Any]=[6, 6, 6, 6, 6, 6] , a : Union[str, Any]=[6, 6, 6, 6, 6, 6] , a : Any=8 , a : List[Any]=2.0 , a : List[Any]=True , a : Optional[Any]=0.0 , a : Union[str, Any]=0.0 , a : Union[str, Any]=0.1 , a : List[Any]="gelu" , a : Any=False , a : Any=0.02 , a : Tuple=1e-5 , a : Optional[int]=2 , a : List[str]=1.0 , a : int="1conv" , a : Dict="pixelshuffle" , **a : str , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**a )
SCREAMING_SNAKE_CASE : Tuple = image_size
SCREAMING_SNAKE_CASE : int = patch_size
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Tuple = embed_dim
SCREAMING_SNAKE_CASE : Dict = depths
SCREAMING_SNAKE_CASE : Tuple = len(a )
SCREAMING_SNAKE_CASE : Tuple = num_heads
SCREAMING_SNAKE_CASE : Dict = window_size
SCREAMING_SNAKE_CASE : Optional[int] = mlp_ratio
SCREAMING_SNAKE_CASE : str = qkv_bias
SCREAMING_SNAKE_CASE : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE : Optional[Any] = drop_path_rate
SCREAMING_SNAKE_CASE : str = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = use_absolute_embeddings
SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps
SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE : Any = upscale
SCREAMING_SNAKE_CASE : int = img_range
SCREAMING_SNAKE_CASE : List[Any] = resi_connection
SCREAMING_SNAKE_CASE : List[str] = upsampler | 25 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE ( snake_case , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = "ssube/stable-diffusion-x4-upscaler-onnx"
def _UpperCamelCase ( self , SCREAMING_SNAKE_CASE__=0 ):
'''simple docstring'''
snake_case: Dict = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) )
snake_case: Optional[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
snake_case: Dict = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Tuple = self.get_dummy_inputs()
snake_case: Dict = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: int = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Optional[Any] = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Any = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[Any] = self.get_dummy_inputs()
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: int = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: int = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[str] = self.get_dummy_inputs()
snake_case: Tuple = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Dict = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: str = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Optional[int] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: List[Any] = self.get_dummy_inputs()
snake_case: int = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: Any = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint , provider='CPUExecutionProvider' )
snake_case: Optional[int] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = self.get_dummy_inputs()
snake_case: Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
snake_case: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
snake_case: int = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Union[str, Any] = ort.SessionOptions()
snake_case: int = False
return options
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case: Dict = init_image.resize((1_28, 1_28) )
# using the PNDM scheduler by default
snake_case: Union[str, Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'A fantasy landscape, trending on artstation'
snake_case: List[Any] = torch.manual_seed(0 )
snake_case: str = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=10 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
snake_case: Tuple = output.images
snake_case: Any = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case: Any = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ):
'''simple docstring'''
snake_case: List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
snake_case: str = init_image.resize((1_28, 1_28) )
snake_case: Tuple = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , subfolder='scheduler' )
snake_case: List[Any] = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' , scheduler=SCREAMING_SNAKE_CASE__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
snake_case: Optional[int] = 'A fantasy landscape, trending on artstation'
snake_case: Any = torch.manual_seed(0 )
snake_case: Optional[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , guidance_scale=7.5 , num_inference_steps=20 , generator=SCREAMING_SNAKE_CASE__ , output_type='np' , )
snake_case: Any = output.images
snake_case: int = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 5_12, 3)
snake_case: Dict = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 329 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__lowerCamelCase : str = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 707 |
'''simple docstring'''
def __snake_case (__UpperCAmelCase ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('''String must only contain alphabetic characters.''' )
lowerCamelCase_ : Union[str, Any] = sorted(string.lower() )
return len(__UpperCAmelCase ) == len(set(__UpperCAmelCase ) )
if __name__ == "__main__":
__lowerCamelCase : Tuple = input("""Enter a string """).strip()
__lowerCamelCase : Union[str, Any] = is_isogram(input_str)
print(f"""{input_str} is {'an' if isogram else 'not an'} isogram.""")
| 418 | 0 |
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.linear_model import LinearRegression
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
# Fitting Polynomial Regression to the dataset
from sklearn.preprocessing import PolynomialFeatures
# Importing the dataset
a = pd.read_csv(
'https://s3.us-west-2.amazonaws.com/public.gamelab.fun/dataset/'
'position_salaries.csv'
)
a = dataset.iloc[:, 1:2].values
a = dataset.iloc[:, 2].values
a , a , a , a = train_test_split(X, y, test_size=0.2, random_state=0)
a = PolynomialFeatures(degree=4)
a = poly_reg.fit_transform(X)
a = LinearRegression()
pol_reg.fit(X_poly, y)
def UpperCAmelCase_ ( ):
plt.scatter(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , color="""red""" )
plt.plot(SCREAMING_SNAKE_CASE_ , pol_reg.predict(poly_reg.fit_transform(SCREAMING_SNAKE_CASE_ ) ) , color="""blue""" )
plt.title("""Truth or Bluff (Linear Regression)""" )
plt.xlabel("""Position level""" )
plt.ylabel("""Salary""" )
plt.show()
if __name__ == "__main__":
viz_polymonial()
# Predicting a new result with Polymonial Regression
pol_reg.predict(poly_reg.fit_transform([[5.5]]))
# output should be 132148.43750003
| 412 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE = {
"tiiuae/falcon-40b": "https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json",
"tiiuae/falcon-7b": "https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json",
}
class lowerCAmelCase_ ( __magic_name__ ):
__lowerCamelCase : Any = "falcon"
__lowerCamelCase : List[str] = ["past_key_values"]
def __init__( self , _lowerCAmelCase=65024 , _lowerCAmelCase=4544 , _lowerCAmelCase=32 , _lowerCAmelCase=71 , _lowerCAmelCase=1E-5 , _lowerCAmelCase=0.02 , _lowerCAmelCase=True , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=False , _lowerCAmelCase=11 , _lowerCAmelCase=11 , **_lowerCAmelCase , ) -> Union[str, Any]:
_lowerCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
_lowerCAmelCase = kwargs.pop("n_embed" , _lowerCAmelCase )
_lowerCAmelCase = hidden_size if n_embed is None else n_embed
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = layer_norm_epsilon
_lowerCAmelCase = initializer_range
_lowerCAmelCase = use_cache
_lowerCAmelCase = hidden_dropout
_lowerCAmelCase = attention_dropout
_lowerCAmelCase = bos_token_id
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
_lowerCAmelCase = alibi
_lowerCAmelCase = new_decoder_architecture
_lowerCAmelCase = multi_query # Ignored when new_decoder_architecture is True
_lowerCAmelCase = parallel_attn
_lowerCAmelCase = bias
super().__init__(bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , **_lowerCAmelCase )
@property
def _snake_case ( self ) -> Optional[Any]:
return self.hidden_size // self.num_attention_heads
@property
def _snake_case ( self ) -> Optional[Any]:
return not self.alibi
| 18 | 0 |
def lowercase_ (A : int , A : int ):
return 1 if input_a == input_a else 0
def lowercase_ ():
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 243 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a_ :Tuple = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_SCREAMING_SNAKE_CASE = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
_SCREAMING_SNAKE_CASE = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
_SCREAMING_SNAKE_CASE = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowercase_ ( self : Any ) ->List[str]:
snake_case__ : Optional[Any] = pipeline(
task='text-classification', model='hf-internal-testing/tiny-random-distilbert', framework='pt' )
snake_case__ : Optional[Any] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
snake_case__ : Any = text_classifier('This is great !', top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}] )
snake_case__ : Any = text_classifier(['This is great !', 'This is bad'], top_k=2 )
self.assertEqual(
nested_simplify(_snake_case ), [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
], )
snake_case__ : Union[str, Any] = text_classifier('This is great !', top_k=1 )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
# Legacy behavior
snake_case__ : List[str] = text_classifier('This is great !', return_all_scores=_snake_case )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
snake_case__ : List[Any] = text_classifier('This is great !', return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}]] )
snake_case__ : Optional[Any] = text_classifier(['This is great !', 'Something else'], return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
[{'label': 'LABEL_0', 'score': 0.5_0_4}, {'label': 'LABEL_1', 'score': 0.4_9_6}],
], )
snake_case__ : Optional[Any] = text_classifier(['This is great !', 'Something else'], return_all_scores=_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [
{'label': 'LABEL_0', 'score': 0.5_0_4},
{'label': 'LABEL_0', 'score': 0.5_0_4},
], )
@require_torch
def lowercase_ ( self : Optional[Any] ) ->int:
import torch
snake_case__ : Dict = pipeline(
task='text-classification', model='hf-internal-testing/tiny-random-distilbert', framework='pt', device=torch.device('cpu' ), )
snake_case__ : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@require_tf
def lowercase_ ( self : Union[str, Any] ) ->int:
snake_case__ : List[Any] = pipeline(
task='text-classification', model='hf-internal-testing/tiny-random-distilbert', framework='tf' )
snake_case__ : List[str] = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'LABEL_0', 'score': 0.5_0_4}] )
@slow
@require_torch
def lowercase_ ( self : Optional[Any] ) ->int:
snake_case__ : Optional[int] = pipeline('text-classification' )
snake_case__ : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'POSITIVE', 'score': 1.0}] )
snake_case__ : str = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'NEGATIVE', 'score': 1.0}] )
snake_case__ : Optional[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
@slow
@require_tf
def lowercase_ ( self : Dict ) ->Dict:
snake_case__ : Tuple = pipeline('text-classification', framework='tf' )
snake_case__ : Tuple = text_classifier('This is great !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'POSITIVE', 'score': 1.0}] )
snake_case__ : int = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'NEGATIVE', 'score': 1.0}] )
snake_case__ : Optional[Any] = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(_snake_case ), [{'label': 'POSITIVE', 'score': 0.9_8_8}] )
def lowercase_ ( self : List[str], _snake_case : List[str], _snake_case : List[str], _snake_case : Dict ) ->int:
snake_case__ : List[Any] = TextClassificationPipeline(model=_snake_case, tokenizer=_snake_case )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowercase_ ( self : Dict, _snake_case : List[str], _snake_case : str ) ->List[str]:
snake_case__ : Dict = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
snake_case__ : int = 'HuggingFace is in'
snake_case__ : Union[str, Any] = text_classifier(_snake_case )
self.assertEqual(nested_simplify(_snake_case ), [{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
snake_case__ : Optional[Any] = ['HuggingFace is in ', 'Paris is in France']
snake_case__ : Optional[Any] = text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), [{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}, {'label': ANY(_snake_case ), 'score': ANY(_snake_case )}], )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
snake_case__ : Any = text_classifier(_snake_case, top_k=_snake_case )
snake_case__ : str = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(_snake_case ), [[{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}] * N, [{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}] * N], )
snake_case__ : List[Any] = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
snake_case__ : Any = text_classifier(_snake_case )
self.assertEqual(
nested_simplify(_snake_case ), {'label': ANY(_snake_case ), 'score': ANY(_snake_case )}, )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
snake_case__ : Optional[Any] = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(_snake_case ):
text_classifier(_snake_case )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
snake_case__ : str = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(_snake_case ), [{'label': ANY(_snake_case ), 'score': ANY(_snake_case )}], )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 243 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a: Optional[Any] = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Dict = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__a: str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 152 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
_snake_case : int = logging.get_logger(__name__)
class a (_lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Optional[int] , *lowerCamelCase : List[Any] , **lowerCamelCase : int ) -> None:
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 81 | 0 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
lowercase_ : List[str] = logging.get_logger(__name__)
# General docstring
lowercase_ : str = '''PoolFormerConfig'''
# Base docstring
lowercase_ : Optional[int] = '''sail/poolformer_s12'''
lowercase_ : str = [1, 512, 7, 7]
# Image classification docstring
lowercase_ : int = '''sail/poolformer_s12'''
lowercase_ : Dict = '''tabby, tabby cat'''
lowercase_ : int = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def SCREAMING_SNAKE_CASE ( lowercase_ : Union[str, Any] , lowercase_ : float = 0.0 , lowercase_ : bool = False ):
if drop_prob == 0.0 or not training:
return input
lowercase = 1 - drop_prob
lowercase = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
lowercase = keep_prob + torch.rand(lowercase_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
lowercase = input.div(lowercase_ ) * random_tensor
return output
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase = None ) -> None:
'''simple docstring'''
super().__init__()
lowercase = drop_prob
def _a ( self , _lowerCAmelCase ) -> torch.Tensor:
'''simple docstring'''
return drop_path(_lowerCAmelCase , self.drop_prob , self.training )
def _a ( self ) -> str:
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None ) -> List[str]:
'''simple docstring'''
super().__init__()
lowercase = patch_size if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (patch_size, patch_size)
lowercase = stride if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (stride, stride)
lowercase = padding if isinstance(_lowerCAmelCase , collections.abc.Iterable ) else (padding, padding)
lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , kernel_size=_lowerCAmelCase , stride=_lowerCAmelCase , padding=_lowerCAmelCase )
lowercase = norm_layer(_lowerCAmelCase ) if norm_layer else nn.Identity()
def _a ( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = self.projection(_lowerCAmelCase )
lowercase = self.norm(_lowerCAmelCase )
return embeddings
class __UpperCamelCase (nn.GroupNorm ):
def __init__( self , _lowerCAmelCase , **_lowerCAmelCase ) -> Tuple:
'''simple docstring'''
super().__init__(1 , _lowerCAmelCase , **_lowerCAmelCase )
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase = nn.AvgPoolad(_lowerCAmelCase , stride=1 , padding=pool_size // 2 , count_include_pad=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[Any]:
'''simple docstring'''
return self.pool(_lowerCAmelCase ) - hidden_states
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
super().__init__()
lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
lowercase = nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
lowercase = PoolFormerDropPath(_lowerCAmelCase )
if isinstance(config.hidden_act , _lowerCAmelCase ):
lowercase = ACTaFN[config.hidden_act]
else:
lowercase = config.hidden_act
def _a ( self , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = self.conva(_lowerCAmelCase )
lowercase = self.act_fn(_lowerCAmelCase )
lowercase = self.drop(_lowerCAmelCase )
lowercase = self.conva(_lowerCAmelCase )
lowercase = self.drop(_lowerCAmelCase )
return hidden_states
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase = PoolFormerPooling(_lowerCAmelCase )
lowercase = PoolFormerOutput(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase = PoolFormerGroupNorm(_lowerCAmelCase )
lowercase = PoolFormerGroupNorm(_lowerCAmelCase )
# Useful for training neural nets
lowercase = PoolFormerDropPath(_lowerCAmelCase ) if drop_path > 0.0 else nn.Identity()
lowercase = config.use_layer_scale
if config.use_layer_scale:
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowerCAmelCase) ) , requires_grad=_lowerCAmelCase )
lowercase = nn.Parameter(
config.layer_scale_init_value * torch.ones((_lowerCAmelCase) ) , requires_grad=_lowerCAmelCase )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if self.use_layer_scale:
lowercase = self.pooling(self.before_norm(_lowerCAmelCase ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
lowercase = hidden_states + self.drop_path(_lowerCAmelCase )
lowercase = ()
lowercase = self.output(self.after_norm(_lowerCAmelCase ) )
lowercase = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
lowercase = hidden_states + self.drop_path(_lowerCAmelCase )
lowercase = (output,) + outputs
return outputs
else:
lowercase = self.drop_path(self.pooling(self.before_norm(_lowerCAmelCase ) ) )
# First residual connection
lowercase = pooling_output + hidden_states
lowercase = ()
# Second residual connection inside the PoolFormerOutput block
lowercase = self.drop_path(self.output(self.after_norm(_lowerCAmelCase ) ) )
lowercase = hidden_states + layer_output
lowercase = (output,) + outputs
return outputs
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
lowercase = config
# stochastic depth decay rule
lowercase = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
lowercase = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
lowercase = nn.ModuleList(_lowerCAmelCase )
# Transformer blocks
lowercase = []
lowercase = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
lowercase = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_lowerCAmelCase , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(_lowerCAmelCase ) )
lowercase = nn.ModuleList(_lowerCAmelCase )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=False , _lowerCAmelCase=True ) -> Tuple:
'''simple docstring'''
lowercase = () if output_hidden_states else None
lowercase = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
lowercase , lowercase = layers
# Get patch embeddings from hidden_states
lowercase = embedding_layer(_lowerCAmelCase )
# Send the embeddings through the blocks
for _, blk in enumerate(_lowerCAmelCase ):
lowercase = blk(_lowerCAmelCase )
lowercase = layer_outputs[0]
if output_hidden_states:
lowercase = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_lowerCAmelCase , hidden_states=_lowerCAmelCase )
class __UpperCamelCase (_UpperCAmelCase ):
__A = PoolFormerConfig
__A = '''poolformer'''
__A = '''pixel_values'''
__A = True
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_lowerCAmelCase , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _a ( self , _lowerCAmelCase , _lowerCAmelCase=False ) -> int:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = value
lowercase_ : Any = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
lowercase_ : List[str] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , _UpperCAmelCase , )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
lowercase = config
lowercase = PoolFormerEncoder(_lowerCAmelCase )
# Initialize weights and apply final processing
self.post_init()
def _a ( self ) -> Tuple:
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _a ( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
'''simple docstring'''
lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
lowercase = self.encoder(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
lowercase = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_lowerCAmelCase , hidden_states=encoder_outputs.hidden_states , )
class __UpperCamelCase (nn.Module ):
def __init__( self , _lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
lowercase = nn.Linear(config.hidden_size , config.hidden_size )
def _a ( self , _lowerCAmelCase ) -> int:
'''simple docstring'''
lowercase = self.dense(_lowerCAmelCase )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , _UpperCAmelCase , )
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase ) -> str:
'''simple docstring'''
super().__init__(_lowerCAmelCase )
lowercase = config.num_labels
lowercase = PoolFormerModel(_lowerCAmelCase )
# Final norm
lowercase = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
lowercase = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_lowerCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_lowerCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _a ( self , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
'''simple docstring'''
lowercase = return_dict if return_dict is not None else self.config.use_return_dict
lowercase = self.poolformer(
_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , return_dict=_lowerCAmelCase , )
lowercase = outputs[0]
lowercase = self.classifier(self.norm(_lowerCAmelCase ).mean([-2, -1] ) )
lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
lowercase = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
lowercase = """single_label_classification"""
else:
lowercase = """multi_label_classification"""
if self.config.problem_type == "regression":
lowercase = MSELoss()
if self.num_labels == 1:
lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
lowercase = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
elif self.config.problem_type == "single_label_classification":
lowercase = CrossEntropyLoss()
lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
lowercase = BCEWithLogitsLoss()
lowercase = loss_fct(_lowerCAmelCase , _lowerCAmelCase )
if not return_dict:
lowercase = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_lowerCAmelCase , logits=_lowerCAmelCase , hidden_states=outputs.hidden_states )
| 653 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
lowercase = HfArgumentParser(lowercase_ )
lowercase = parser.parse_args_into_dataclasses()[0]
lowercase = TensorFlowBenchmark(args=lowercase_ )
try:
lowercase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
lowercase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
lowercase = """ """.join(str(lowercase_ ).split(""" """ )[:-1] )
lowercase = """"""
lowercase = eval(str(lowercase_ ).split(""" """ )[-1] )
lowercase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(lowercase_ )
if len(lowercase_ ) > 0:
lowercase = full_error_msg + begin_error_msg + str(lowercase_ )
raise ValueError(lowercase_ )
benchmark.run()
if __name__ == "__main__":
main()
| 653 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = []
for part_id in partition_order:
snake_case_ = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(SCREAMING_SNAKE_CASE__ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(100 ).repartition(1 )
snake_case_ = Spark(SCREAMING_SNAKE_CASE__ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(10 ).repartition(2 )
snake_case_ = [1, 0]
snake_case_ = _generate_iterable_examples(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) # Reverse the partitions.
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case_, snake_case_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(10 ).repartition(1 )
snake_case_ = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
snake_case_ = lambda SCREAMING_SNAKE_CASE__ : x.reverse()
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , [2, 1, 0] )
snake_case_ = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shuffle_data_sources(SCREAMING_SNAKE_CASE__ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case_ = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , [0, 2] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case_ = SparkExamplesIterable(SCREAMING_SNAKE_CASE__ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case_ = _get_expected_row_ids_and_row_dicts_for_partition_order(SCREAMING_SNAKE_CASE__ , [1, 3] )
for i, (row_id, row_dict) in enumerate(SCREAMING_SNAKE_CASE__ ):
snake_case_, snake_case_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def __SCREAMING_SNAKE_CASE ():
snake_case_ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
snake_case_ = spark.range(100 ).repartition(1 )
snake_case_ = Spark(SCREAMING_SNAKE_CASE__ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100 | 39 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
if exponent == 1:
return base
if exponent % 2 == 0:
UpperCAmelCase__ : List[str] = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value
def _UpperCamelCase ( UpperCamelCase__ = 1_7_7_7 , UpperCamelCase__ = 1_8_5_5 , UpperCamelCase__ = 8 ):
UpperCAmelCase__ : List[str] = base
for _ in range(1 , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 1_0**digits )
return result
if __name__ == "__main__":
print(f"""{solution() = }""") | 407 | 0 |
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
set_seed(770)
__SCREAMING_SNAKE_CASE = {
"""c_attn""": """att_proj""",
"""c_proj""": """out_proj""",
"""c_fc""": """in_proj""",
"""transformer.""": """""",
"""h.""": """layers.""",
"""ln_1""": """layernorm_1""",
"""ln_2""": """layernorm_2""",
"""ln_f""": """layernorm_final""",
"""wpe""": """position_embeds_layer""",
"""wte""": """input_embeds_layer""",
}
__SCREAMING_SNAKE_CASE = {
"""text_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text.pt""",
},
"""coarse_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse.pt""",
},
"""fine_small""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine.pt""",
},
"""text""": {
"""repo_id""": """suno/bark""",
"""file_name""": """text_2.pt""",
},
"""coarse""": {
"""repo_id""": """suno/bark""",
"""file_name""": """coarse_2.pt""",
},
"""fine""": {
"""repo_id""": """suno/bark""",
"""file_name""": """fine_2.pt""",
},
}
__SCREAMING_SNAKE_CASE = os.path.dirname(os.path.abspath(__file__))
__SCREAMING_SNAKE_CASE = os.path.join(os.path.expanduser("""~"""), """.cache""")
__SCREAMING_SNAKE_CASE = os.path.join(os.getenv("""XDG_CACHE_HOME""", default_cache_dir), """suno""", """bark_v0""")
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False ):
A : List[str] = model_type
if use_small:
key += "_small"
return os.path.join(_lowerCamelCase , REMOTE_MODEL_PATHS[key]["file_name"] )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase ):
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
hf_hub_download(repo_id=_lowerCamelCase , filename=_lowerCamelCase , local_dir=_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase="text" ):
if model_type == "text":
A : Any = BarkSemanticModel
A : Dict = BarkSemanticConfig
A : Tuple = BarkSemanticGenerationConfig
elif model_type == "coarse":
A : Optional[int] = BarkCoarseModel
A : Union[str, Any] = BarkCoarseConfig
A : Optional[Any] = BarkCoarseGenerationConfig
elif model_type == "fine":
A : List[str] = BarkFineModel
A : Optional[Any] = BarkFineConfig
A : Any = BarkFineGenerationConfig
else:
raise NotImplementedError()
A : List[Any] = f"""{model_type}_small""" if use_small else model_type
A : Optional[int] = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(_lowerCamelCase ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info["repo_id"] , model_info["file_name"] )
A : Optional[Any] = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
# this is a hack
A : Any = checkpoint["model_args"]
if "input_vocab_size" not in model_args:
A : Union[str, Any] = model_args["vocab_size"]
A : Tuple = model_args["vocab_size"]
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
A : Optional[Any] = model_args.pop("n_head" )
A : Optional[Any] = model_args.pop("n_embd" )
A : Any = model_args.pop("n_layer" )
A : int = ConfigClass(**checkpoint["model_args"] )
A : Optional[Any] = ModelClass(config=_lowerCamelCase )
A : int = GenerationConfigClass()
A : Any = model_generation_config
A : Optional[int] = checkpoint["model"]
# fixup checkpoint
A : Dict = "_orig_mod."
for k, v in list(state_dict.items() ):
if k.startswith(_lowerCamelCase ):
# replace part of the key with corresponding layer name in HF implementation
A : List[Any] = k[len(_lowerCamelCase ) :]
for old_layer_name in new_layer_name_dict:
A : List[str] = new_k.replace(_lowerCamelCase , new_layer_name_dict[old_layer_name] )
A : Optional[int] = state_dict.pop(_lowerCamelCase )
A : Tuple = set(state_dict.keys() ) - set(model.state_dict().keys() )
A : str = {k for k in extra_keys if not k.endswith(".attn.bias" )}
A : str = set(model.state_dict().keys() ) - set(state_dict.keys() )
A : str = {k for k in missing_keys if not k.endswith(".attn.bias" )}
if len(_lowerCamelCase ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(_lowerCamelCase ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
A : List[str] = model.num_parameters(exclude_embeddings=_lowerCamelCase )
A : Optional[Any] = checkpoint["best_val_loss"].item()
logger.info(f"""model loaded: {round(n_params/1e6 , 1 )}M params, {round(_lowerCamelCase , 3 )} loss""" )
model.eval()
model.to(_lowerCamelCase )
del checkpoint, state_dict
return model
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase=False , _lowerCamelCase="text" ):
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
A : Dict = "cpu" # do conversion on cpu
A : Tuple = _get_ckpt_path(_lowerCamelCase , use_small=_lowerCamelCase )
A : str = _load_model(_lowerCamelCase , _lowerCamelCase , model_type=_lowerCamelCase , use_small=_lowerCamelCase )
# load bark initial model
A : Optional[Any] = _bark_load_model(_lowerCamelCase , "cpu" , model_type=_lowerCamelCase , use_small=_lowerCamelCase )
if model_type == "text":
A : Tuple = bark_model["model"]
if model.num_parameters(exclude_embeddings=_lowerCamelCase ) != bark_model.get_num_params():
raise ValueError("initial and new models don't have the same number of parameters" )
# check if same output as the bark model
A : Union[str, Any] = 5
A : Tuple = 10
if model_type in ["text", "coarse"]:
A : int = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
A : int = bark_model(_lowerCamelCase )[0]
A : Dict = model(_lowerCamelCase )
# take last logits
A : Dict = output_new_model_total.logits[:, [-1], :]
else:
A : str = 3
A : Optional[int] = 8
A : List[str] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
A : Optional[Any] = model(_lowerCamelCase , _lowerCamelCase )
A : Optional[int] = bark_model(_lowerCamelCase , _lowerCamelCase )
A : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError("initial and new outputs don't have the same shape" )
if (output_new_model - output_old_model).abs().max().item() > 1e-3:
raise ValueError("initial and new outputs are not equal" )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , ):
A : str = os.path.join(_lowerCamelCase , _lowerCamelCase )
A : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(_lowerCamelCase , "config.json" ) )
A : Dict = BarkCoarseConfig.from_pretrained(os.path.join(_lowerCamelCase , "config.json" ) )
A : List[Any] = BarkFineConfig.from_pretrained(os.path.join(_lowerCamelCase , "config.json" ) )
A : Dict = EncodecConfig.from_pretrained("facebook/encodec_24khz" )
A : int = BarkSemanticModel.from_pretrained(_lowerCamelCase )
A : Tuple = BarkCoarseModel.from_pretrained(_lowerCamelCase )
A : Optional[Any] = BarkFineModel.from_pretrained(_lowerCamelCase )
A : Optional[int] = EncodecModel.from_pretrained("facebook/encodec_24khz" )
A : int = BarkConfig.from_sub_model_configs(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
A : Tuple = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
A : Union[str, Any] = BarkModel(_lowerCamelCase )
A : Any = semantic
A : Any = coarseAcoustic
A : Any = fineAcoustic
A : Tuple = codec
A : Any = bark_generation_config
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
bark.save_pretrained(_lowerCamelCase , repo_id=_lowerCamelCase , push_to_hub=_lowerCamelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""model_type""", type=str, help="""text, coarse or fine.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--is_small""", action="""store_true""", help="""convert the small version instead of the large.""")
__SCREAMING_SNAKE_CASE = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small) | 708 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Tuple=13 , __lowerCamelCase : List[str]=30 , __lowerCamelCase : Tuple=2 , __lowerCamelCase : Any=3 , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : int=32 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Any=4 , __lowerCamelCase : Optional[int]=37 , __lowerCamelCase : List[Any]="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=10 , __lowerCamelCase : Dict=0.02 , __lowerCamelCase : Tuple=3 , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=2 , ) -> str:
A : List[Any] = parent
A : Optional[int] = batch_size
A : Any = image_size
A : Optional[Any] = patch_size
A : Optional[Any] = num_channels
A : Tuple = is_training
A : Optional[Any] = use_labels
A : Union[str, Any] = hidden_size
A : Tuple = num_hidden_layers
A : Union[str, Any] = num_attention_heads
A : Union[str, Any] = intermediate_size
A : Any = hidden_act
A : Tuple = hidden_dropout_prob
A : Dict = attention_probs_dropout_prob
A : Any = type_sequence_label_size
A : Tuple = initializer_range
A : List[Any] = scope
A : Optional[int] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
A : List[str] = (image_size // patch_size) ** 2
A : List[str] = num_patches + 2
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A : List[Any] = None
if self.use_labels:
A : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : Dict = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Dict:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE__ ( self : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : List[str] ) -> int:
A : Optional[int] = DeiTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : List[Any] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Any ) -> Any:
A : List[Any] = DeiTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
A : List[str] = 1
A : Optional[int] = DeiTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Optional[int] = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ) -> Dict:
A : str = self.type_sequence_label_size
A : List[str] = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
A : Any = 1
A : str = DeiTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
A : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A : Tuple = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Dict = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) ,
) : Tuple = config_and_inputs
A : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase_ ( _A ,_A ,unittest.TestCase ):
'''simple docstring'''
a__ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a__ = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a__ = False
a__ = False
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[int]:
A : str = DeiTModelTester(self )
A : Optional[int] = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> int:
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
A , A : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Dict = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , nn.Linear ) )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[str]:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A : Union[str, Any] = model_class(__lowerCamelCase )
A : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A : Any = [*signature.parameters.keys()]
A : Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Dict:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Optional[Any]:
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any]=False ) -> str:
A : Union[str, Any] = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
if not self.model_tester.is_training:
return
A , A : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
A : Dict = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__lowerCamelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
A : Union[str, Any] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : Union[str, Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Dict = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
A : Tuple = False
A : Any = True
for model_class in self.all_model_classes:
if model_class in get_values(__lowerCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
A : List[str] = model_class(__lowerCamelCase )
model.gradient_checkpointing_enable()
model.to(__lowerCamelCase )
model.train()
A : Dict = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
A : Tuple = model(**__lowerCamelCase ).loss
loss.backward()
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
A , A : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
A : int = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__lowerCamelCase ),
*get_values(__lowerCamelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
A : Tuple = problem_type["title"]
A : Optional[Any] = problem_type["num_labels"]
A : List[str] = model_class(__lowerCamelCase )
model.to(__lowerCamelCase )
model.train()
A : List[Any] = self._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if problem_type["num_labels"] > 1:
A : List[str] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
A : int = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__lowerCamelCase ) as warning_list:
A : Optional[Any] = model(**__lowerCamelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A : Optional[Any] = DeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def UpperCAmelCase ( ):
A : Optional[int] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Optional[int]:
A : Optional[int] = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
__lowerCamelCase )
A : List[Any] = self.default_image_processor
A : List[Any] = prepare_img()
A : Optional[Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
A : List[str] = model(**__lowerCamelCase )
# verify the logits
A : str = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
A : List[str] = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Optional[Any]:
A : str = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
A : Dict = self.default_image_processor
A : Optional[int] = prepare_img()
A : Union[str, Any] = image_processor(images=__lowerCamelCase , return_tensors="pt" )
A : Union[str, Any] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
A : List[str] = model(__lowerCamelCase ) | 17 | 0 |
def __UpperCAmelCase ( lowerCamelCase_ : int ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(lowerCamelCase_ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10))
| 105 |
"""simple docstring"""
def lowercase__ ( lowerCamelCase : list , lowerCamelCase : int = 0 ) -> list:
lowerCAmelCase__ : List[Any] = length or len(lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = list_data[i + 1], list_data[i]
lowerCAmelCase__ : str = True
return list_data if not swapped else bubble_sort(lowerCamelCase , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE_ : Optional[int] = logging.get_logger(__name__)
def UpperCAmelCase__ ( A__ ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(A__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(A__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(A__ ):
return [[videos]]
raise ValueError(f'Could not make batched video from {videos}' )
class _A ( __a ):
__a = ['pixel_values']
def __init__( self , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = 1 / 255 , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> None:
super().__init__(**SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = size if size is not None else {"shortest_edge": 256}
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="crop_size" )
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_center_crop
lowerCamelCase__ = crop_size
lowerCamelCase__ = resample
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = offset
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCamelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = PILImageResampling.BILINEAR , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
if "shortest_edge" in size:
lowerCamelCase__ = get_resize_output_image_size(SCREAMING_SNAKE_CASE__ , size["shortest_edge"] , default_to_square=SCREAMING_SNAKE_CASE__ )
elif "height" in size and "width" in size:
lowerCamelCase__ = (size["height"], size["width"])
else:
raise ValueError(f'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ )
if "height" not in size or "width" not in size:
raise ValueError(f'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(SCREAMING_SNAKE_CASE__ , size=(size["height"], size["width"]) , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> Union[str, Any]:
lowerCamelCase__ = image.astype(np.floataa )
if offset:
lowerCamelCase__ = image - (scale / 2)
return rescale(SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> np.ndarray:
return normalize(SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , ) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
lowerCamelCase__ = to_numpy_array(SCREAMING_SNAKE_CASE__ )
if do_resize:
lowerCamelCase__ = self.resize(image=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ )
if do_center_crop:
lowerCamelCase__ = self.center_crop(SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ )
if do_rescale:
lowerCamelCase__ = self.rescale(image=SCREAMING_SNAKE_CASE__ , scale=SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ )
if do_normalize:
lowerCamelCase__ = self.normalize(image=SCREAMING_SNAKE_CASE__ , mean=SCREAMING_SNAKE_CASE__ , std=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = to_channel_dimension_format(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return image
def _lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = ChannelDimension.FIRST , **SCREAMING_SNAKE_CASE__ , ) -> PIL.Image.Image:
lowerCamelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ = resample if resample is not None else self.resample
lowerCamelCase__ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ = offset if offset is not None else self.offset
lowerCamelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ = image_std if image_std is not None else self.image_std
lowerCamelCase__ = size if size is not None else self.size
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , default_to_square=SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ = get_size_dict(SCREAMING_SNAKE_CASE__ , param_name="crop_size" )
if not valid_images(SCREAMING_SNAKE_CASE__ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
lowerCamelCase__ = make_batched(SCREAMING_SNAKE_CASE__ )
lowerCamelCase__ = [
[
self._preprocess_image(
image=SCREAMING_SNAKE_CASE__ , do_resize=SCREAMING_SNAKE_CASE__ , size=SCREAMING_SNAKE_CASE__ , resample=SCREAMING_SNAKE_CASE__ , do_center_crop=SCREAMING_SNAKE_CASE__ , crop_size=SCREAMING_SNAKE_CASE__ , do_rescale=SCREAMING_SNAKE_CASE__ , rescale_factor=SCREAMING_SNAKE_CASE__ , offset=SCREAMING_SNAKE_CASE__ , do_normalize=SCREAMING_SNAKE_CASE__ , image_mean=SCREAMING_SNAKE_CASE__ , image_std=SCREAMING_SNAKE_CASE__ , data_format=SCREAMING_SNAKE_CASE__ , )
for img in video
]
for video in videos
]
lowerCamelCase__ = {"pixel_values": videos}
return BatchFeature(data=SCREAMING_SNAKE_CASE__ , tensor_type=SCREAMING_SNAKE_CASE__ )
| 274 |
"""simple docstring"""
def UpperCAmelCase__ ( ) -> int:
"""simple docstring"""
return 1
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(A__ )
def UpperCAmelCase__ ( A__ ) -> int:
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(A__ )
def UpperCAmelCase__ ( A__ = 200 ) -> int:
"""simple docstring"""
return two_pound(A__ )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 274 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def __lowercase ( snake_case = 3 ):
"""simple docstring"""
if isinstance(snake_case, snake_case ):
raise TypeError('''number of qubits must be a integer.''' )
if number_of_qubits <= 0:
raise ValueError('''number of qubits must be > 0.''' )
if math.floor(snake_case ) != number_of_qubits:
raise ValueError('''number of qubits must be exact integer.''' )
if number_of_qubits > 1_0:
raise ValueError('''number of qubits too large to simulate(>10).''' )
__magic_name__ :List[Any] = QuantumRegister(snake_case, '''qr''' )
__magic_name__ :Union[str, Any] = ClassicalRegister(snake_case, '''cr''' )
__magic_name__ :Tuple = QuantumCircuit(snake_case, snake_case )
__magic_name__ :Optional[Any] = number_of_qubits
for i in range(snake_case ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(snake_case ):
quantum_circuit.cp(np.pi / 2 ** (counter - j), snake_case, snake_case )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(snake_case, number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(snake_case, snake_case )
# simulate with 10000 shots
__magic_name__ :List[Any] = Aer.get_backend('''qasm_simulator''' )
__magic_name__ :str = execute(snake_case, snake_case, shots=1_0_0_0_0 )
return job.result().get_counts(snake_case )
if __name__ == "__main__":
print(
f"Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}"
)
| 0 |
import string
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = ''
for i in sequence:
_snake_case : Tuple = ord(lowerCAmelCase )
if 65 <= extract <= 90:
output += chr(1_55 - extract )
elif 97 <= extract <= 1_22:
output += chr(2_19 - extract )
else:
output += i
return output
def lowerCamelCase_ ( lowerCAmelCase: str )-> str:
_snake_case : str = string.ascii_letters
_snake_case : List[str] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCAmelCase )] if c in letters else c for c in sequence )
def lowerCamelCase_ ( )-> None:
from timeit import timeit
print('Running performance benchmarks...' )
_snake_case : List[str] = 'from string import printable ; from __main__ import atbash, atbash_slow'
print(F"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=lowerCAmelCase )} seconds""" )
print(F"""> atbash(): {timeit('atbash(printable)' , setup=lowerCAmelCase )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(F"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 411 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( lowerCamelCase__ ) -> List[str]:
return getitem, k
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Tuple:
return setitem, k, v
def lowerCAmelCase__ ( lowerCamelCase__ ) -> List[str]:
return delitem, k
def lowerCAmelCase__ ( lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ ) -> List[str]:
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
A = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
A = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
A = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
A = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
A = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCAmelCase__ ( lowerCamelCase__ ) -> List[Any]:
A = HashMap(initial_block_size=4 )
A = {}
for _, (fun, *args) in enumerate(snake_case_ ):
A = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
A = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) -> int:
def is_public(lowerCamelCase__ ) -> bool:
return not name.startswith('_' )
A = {name for name in dir({} ) if is_public(snake_case_ )}
A = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 703 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A = logging.get_logger(__name__)
A = {
'google/fnet-base': 'https://huggingface.co/google/fnet-base/resolve/main/config.json',
'google/fnet-large': 'https://huggingface.co/google/fnet-large/resolve/main/config.json'
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : List[Any] = """fnet"""
def __init__( self : Tuple , snake_case : Optional[int]=32_000 , snake_case : Optional[Any]=768 , snake_case : Union[str, Any]=12 , snake_case : List[str]=3_072 , snake_case : str="gelu_new" , snake_case : Any=0.1 , snake_case : List[str]=512 , snake_case : Any=4 , snake_case : str=0.02 , snake_case : Union[str, Any]=1E-1_2 , snake_case : Optional[Any]=False , snake_case : List[Any]=512 , snake_case : int=3 , snake_case : Any=1 , snake_case : Union[str, Any]=2 , **snake_case : Union[str, Any] , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , **snake_case )
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = num_hidden_layers
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
A = use_tpu_fourier_optimizations
A = tpu_short_seq_length
| 109 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.