code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Dict = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 1 |
'''simple docstring'''
from collections import UserDict
from typing import Union
import numpy as np
import requests
from ..utils import (
add_end_docstrings,
logging,
)
from .audio_classification import ffmpeg_read
from .base import PIPELINE_INIT_ARGS, Pipeline
UpperCamelCase : List[str] = logging.get_logger(__name__)
@add_end_docstrings(a_ )
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Optional[int] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""")
# No specific FOR_XXX available yet
def __call__( self : List[Any] , UpperCAmelCase_ : Union[np.ndarray, bytes, str] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
return super().__call__(UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Optional[Any] = {}
if "candidate_labels" in kwargs:
a : Union[str, Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
a : List[str] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="This is a sound of {}."):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
if audio.startswith('http://') or audio.startswith('https://'):
# We need to actually check for a real protocol, otherwise it's impossible to use a local file
# like http_huggingface_co.png
a : Tuple = requests.get(UpperCAmelCase_).content
else:
with open(UpperCAmelCase_ , 'rb') as f:
a : str = f.read()
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Tuple = ffmpeg_read(UpperCAmelCase_ , self.feature_extractor.sampling_rate)
if not isinstance(UpperCAmelCase_ , np.ndarray):
raise ValueError('We expect a numpy ndarray as input')
if len(audio.shape) != 1:
raise ValueError('We expect a single channel audio input for ZeroShotAudioClassificationPipeline')
a : Union[str, Any] = self.feature_extractor(
[audio] , sampling_rate=self.feature_extractor.sampling_rate , return_tensors='pt')
a : Optional[Any] = candidate_labels
a : List[str] = [hypothesis_template.format(UpperCAmelCase_) for x in candidate_labels]
a : Tuple = self.tokenizer(UpperCAmelCase_ , return_tensors=self.framework , padding=UpperCAmelCase_)
a : List[str] = [text_inputs]
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : List[str] = model_inputs.pop('candidate_labels')
a : str = model_inputs.pop('text_inputs')
if isinstance(text_inputs[0] , UpperCAmelCase_):
a : int = text_inputs[0]
else:
# Batching case.
a : Optional[Any] = text_inputs[0][0]
a : Tuple = self.model(**UpperCAmelCase_ , **UpperCAmelCase_)
a : int = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_audio,
}
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Any = model_outputs.pop('candidate_labels')
a : List[str] = model_outputs['logits'][0]
if self.framework == "pt":
a : Optional[Any] = logits.softmax(dim=0)
a : Optional[Any] = probs.tolist()
else:
raise ValueError('`tf` framework not supported.')
a : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(UpperCAmelCase_ , UpperCAmelCase_) , key=lambda UpperCAmelCase_: -x[0])
]
return result
| 345 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> str:
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
a : List[str] = str(bin(snake_case ) )[2:] # remove the leading "0b"
a : Any = str(bin(snake_case ) )[2:]
a : Optional[Any] = max(len(snake_case ) , len(snake_case ) )
return "0b" + "".join(
str(int('1' in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(snake_case ) , b_binary.zfill(snake_case ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Any = {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json""",
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "blenderbot-small"
A : Union[str, Any] = ["past_key_values"]
A : List[str] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Any , UpperCAmelCase_ : List[Any]=5_0_2_6_5 , UpperCAmelCase_ : Any=5_1_2 , UpperCAmelCase_ : Optional[Any]=8 , UpperCAmelCase_ : Tuple=2_0_4_8 , UpperCAmelCase_ : Optional[int]=1_6 , UpperCAmelCase_ : str=8 , UpperCAmelCase_ : Optional[int]=2_0_4_8 , UpperCAmelCase_ : Tuple=1_6 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Any=5_1_2 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : int=0.0 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Union[str, Any]=2 , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[Any] = vocab_size
a : Optional[Any] = max_position_embeddings
a : Optional[Any] = d_model
a : Dict = encoder_ffn_dim
a : List[Any] = encoder_layers
a : Dict = encoder_attention_heads
a : Optional[int] = decoder_ffn_dim
a : Optional[int] = decoder_layers
a : Tuple = decoder_attention_heads
a : List[str] = dropout
a : List[Any] = attention_dropout
a : Dict = activation_dropout
a : Tuple = activation_function
a : Union[str, Any] = init_std
a : Tuple = encoder_layerdrop
a : List[str] = decoder_layerdrop
a : Optional[int] = use_cache
a : str = encoder_layers
a : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , forced_eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
class UpperCamelCase ( a_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
a : Union[str, Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
a : Any = {0: 'batch'}
a : Union[str, Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Optional[Any] = {0: 'batch', 1: 'decoder_sequence'}
a : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs')
elif self.task == "causal-lm":
# TODO: figure this case out.
a : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
])
if self.use_past:
a , a : Any = self.num_layers
for i in range(UpperCAmelCase_):
a : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
a : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
else:
a : str = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
])
return common_inputs
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
a : Dict = super().outputs
else:
a : Union[str, Any] = super(UpperCAmelCase_ , self).outputs
if self.use_past:
a , a : Optional[Any] = self.num_layers
for i in range(UpperCAmelCase_):
a : Optional[int] = {0: 'batch', 2: 'past_sequence + sequence'}
a : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
a : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Generate decoder inputs
a : Optional[Any] = seq_length if not self.use_past else 1
a : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[int] = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
a : Union[str, Any] = dict(**UpperCAmelCase_ , **UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
a , a : int = common_inputs['input_ids'].shape
a : Optional[int] = common_inputs['decoder_input_ids'].shape[1]
a , a : Tuple = self.num_attention_heads
a : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a : List[Any] = decoder_seq_length + 3
a : Optional[int] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
a : Dict = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_)] , dim=1)
a : Union[str, Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
a , a : List[Any] = self.num_layers
a : Union[str, Any] = min(UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[int] = max(UpperCAmelCase_ , UpperCAmelCase_) - min_num_layers
a : Tuple = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(UpperCAmelCase_):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
torch.zeros(UpperCAmelCase_),
))
# TODO: test this.
a : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(UpperCAmelCase_ , UpperCAmelCase_):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)))
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
a : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
a , a : int = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
a : Any = seqlen + 2
a , a : Optional[int] = self.num_layers
a , a : int = self.num_attention_heads
a : List[str] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
a : Dict = common_inputs['attention_mask'].dtype
a : Tuple = torch.cat(
[common_inputs['attention_mask'], torch.ones(UpperCAmelCase_ , UpperCAmelCase_ , dtype=UpperCAmelCase_)] , dim=1)
a : Dict = [
(torch.zeros(UpperCAmelCase_), torch.zeros(UpperCAmelCase_)) for _ in range(UpperCAmelCase_)
]
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
a : Optional[int] = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a : List[str] = tokenizer.num_special_tokens_to_add(UpperCAmelCase_)
a : Any = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
a : Optional[Any] = [' '.join([tokenizer.unk_token]) * seq_length] * batch_size
a : List[str] = dict(tokenizer(UpperCAmelCase_ , return_tensors=UpperCAmelCase_))
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[TensorType] = None , ):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
a : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
elif self.task == "causal-lm":
a : str = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
else:
a : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase_ , batch_size=UpperCAmelCase_ , seq_length=UpperCAmelCase_ , is_pair=UpperCAmelCase_ , framework=UpperCAmelCase_)
return common_inputs
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if self.task in ["default", "seq2seq-lm"]:
a : Optional[Any] = super()._flatten_past_key_values_(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
else:
a : Any = super(UpperCAmelCase_ , self)._flatten_past_key_values_(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : str = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "encodec"
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Dict=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCAmelCase_ : int=2_4_0_0_0 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=1_2_8 , UpperCAmelCase_ : List[Any]=3_2 , UpperCAmelCase_ : List[Any]=1 , UpperCAmelCase_ : Optional[Any]=[8, 5, 4, 2] , UpperCAmelCase_ : str="weight_norm" , UpperCAmelCase_ : List[str]=7 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int="reflect" , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Dict=1.0 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : Any=True , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : List[Any] = target_bandwidths
a : str = sampling_rate
a : int = audio_channels
a : str = normalize
a : str = chunk_length_s
a : Tuple = overlap
a : Optional[int] = hidden_size
a : Union[str, Any] = num_filters
a : List[str] = num_residual_layers
a : Union[str, Any] = upsampling_ratios
a : str = norm_type
a : List[str] = kernel_size
a : Union[str, Any] = last_kernel_size
a : List[Any] = residual_kernel_size
a : Any = dilation_growth_rate
a : Any = use_causal_conv
a : List[str] = pad_mode
a : Optional[int] = compress
a : List[Any] = num_lstm_layers
a : Optional[Any] = trim_right_ratio
a : Any = codebook_size
a : Any = codebook_dim if codebook_dim is not None else hidden_size
a : Dict = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""")
super().__init__(**UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate)
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length))
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[int] = np.prod(self.upsampling_ratios)
return math.ceil(self.sampling_rate / hop_length)
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
| 345 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
# TODO Update this
UpperCamelCase : Any = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "esm"
def __init__( self : str , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Optional[Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : List[Any]=3_0_7_2 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]=1_0_2_6 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : int=1e-12 , UpperCAmelCase_ : int="absolute" , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Tuple=False , UpperCAmelCase_ : Union[str, Any]=None , UpperCAmelCase_ : Dict=None , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , mask_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : int = vocab_size
a : Dict = hidden_size
a : Tuple = num_hidden_layers
a : Dict = num_attention_heads
a : Dict = intermediate_size
a : List[Any] = hidden_dropout_prob
a : int = attention_probs_dropout_prob
a : int = max_position_embeddings
a : List[Any] = initializer_range
a : Tuple = layer_norm_eps
a : Dict = position_embedding_type
a : Optional[Any] = use_cache
a : List[str] = emb_layer_norm_before
a : Optional[Any] = token_dropout
a : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.')
a : Optional[int] = EsmFoldConfig()
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Optional[int] = EsmFoldConfig(**UpperCAmelCase_)
a : Any = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!')
a : int = get_default_vocab_list()
else:
a : List[Any] = vocab_list
else:
a : Optional[Any] = None
a : Union[str, Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , UpperCAmelCase_):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!')
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : List[Any] = super().to_dict()
if isinstance(self.esmfold_config , UpperCAmelCase_):
a : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = None
A : bool = True
A : bool = False
A : bool = False
A : bool = False
A : float = 0
A : bool = True
A : bool = False
A : int = 128
A : "TrunkConfig" = None
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
if self.trunk is None:
a : Any = TrunkConfig()
elif isinstance(self.trunk , UpperCAmelCase_):
a : Dict = TrunkConfig(**self.trunk)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = asdict(self)
a : List[Any] = self.trunk.to_dict()
return output
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : int = 48
A : int = 1024
A : int = 128
A : int = 32
A : int = 32
A : int = 32
A : float = 0
A : float = 0
A : bool = False
A : int = 4
A : Optional[int] = 128
A : "StructureModuleConfig" = None
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
if self.structure_module is None:
a : Optional[Any] = StructureModuleConfig()
elif isinstance(self.structure_module , UpperCAmelCase_):
a : Tuple = StructureModuleConfig(**self.structure_module)
if self.max_recycles <= 0:
raise ValueError(f"""`max_recycles` should be positive, got {self.max_recycles}.""")
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f""" {self.sequence_state_dim} and {self.sequence_state_dim}.""")
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f""" {self.pairwise_state_dim} and {self.pairwise_state_dim}.""")
a : str = self.sequence_state_dim // self.sequence_head_width
a : int = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f""" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.""")
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f""" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.""")
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"""`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.""")
if self.dropout >= 0.4:
raise ValueError(f"""`dropout` should not be greater than 0.4, got {self.dropout}.""")
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[Any] = asdict(self)
a : Dict = self.structure_module.to_dict()
return output
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : int = 384
A : int = 128
A : int = 16
A : int = 128
A : int = 12
A : int = 4
A : int = 8
A : float = 0.1
A : int = 8
A : int = 1
A : int = 2
A : int = 7
A : int = 10
A : float = 1E-8
A : float = 1E5
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return asdict(self)
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 345 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 1 |
'''simple docstring'''
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
a : Union[str, Any] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
a : Optional[int] = Dataset.from_dict(snake_case )
return dataset
class UpperCamelCase ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[int] = get_dataset()
a : Dict = make_duplicate_clusters(UpperCAmelCase_ , 0.85)
self.assertEqual(len(duplicate_clusters[0]) , 2)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = get_dataset()
a , a : List[str] = deduplicate_dataset(UpperCAmelCase_)
self.assertEqual(len(UpperCAmelCase_) , 2)
print(UpperCAmelCase_)
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2)
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , UpperCAmelCase_)
| 345 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Union[str, Any]=0.9_99 , snake_case : Tuple="cosine" , ) -> str:
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(snake_case : str ):
return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(snake_case : Dict ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
a : Optional[Any] = []
for i in range(snake_case ):
a : Optional[int] = i / num_diffusion_timesteps
a : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(snake_case ) / alpha_bar_fn(snake_case ) , snake_case ) )
return torch.tensor(snake_case , dtype=torch.floataa )
class UpperCamelCase ( a_ , a_ ):
"""simple docstring"""
A : str = [e.name for e in KarrasDiffusionSchedulers]
A : Union[str, Any] = 2
@register_to_config
def __init__( self : Any , UpperCAmelCase_ : int = 1_0_0_0 , UpperCAmelCase_ : float = 0.0_00_85 , UpperCAmelCase_ : float = 0.0_12 , UpperCAmelCase_ : str = "linear" , UpperCAmelCase_ : Optional[Union[np.ndarray, List[float]]] = None , UpperCAmelCase_ : str = "epsilon" , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : str = "linspace" , UpperCAmelCase_ : int = 0 , ):
"""simple docstring"""
if trained_betas is not None:
a : List[Any] = torch.tensor(UpperCAmelCase_ , dtype=torch.floataa)
elif beta_schedule == "linear":
a : Tuple = torch.linspace(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dtype=torch.floataa)
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
a : str = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , UpperCAmelCase_ , dtype=torch.floataa) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
a : Tuple = betas_for_alpha_bar(UpperCAmelCase_ , alpha_transform_type='cosine')
elif beta_schedule == "exp":
a : List[Any] = betas_for_alpha_bar(UpperCAmelCase_ , alpha_transform_type='exp')
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""")
a : int = 1.0 - self.betas
a : Any = torch.cumprod(self.alphas , dim=0)
# set all values
self.set_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : Tuple = use_karras_sigmas
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : int=None):
"""simple docstring"""
if schedule_timesteps is None:
a : List[str] = self.timesteps
a : Optional[int] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter) == 0:
a : Any = 1 if len(UpperCAmelCase_) > 1 else 0
else:
a : Dict = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_) else timestep
a : Any = self._index_counter[timestep_int]
return indices[pos].item()
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[float, torch.FloatTensor] , ):
"""simple docstring"""
a : Any = self.index_for_timestep(UpperCAmelCase_)
a : Optional[int] = self.sigmas[step_index]
a : int = sample / ((sigma**2 + 1) ** 0.5)
return sample
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, torch.device] = None , UpperCAmelCase_ : Optional[int] = None , ):
"""simple docstring"""
a : Any = num_inference_steps
a : Optional[Any] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
a : int = np.linspace(0 , num_train_timesteps - 1 , UpperCAmelCase_ , dtype=UpperCAmelCase_)[::-1].copy()
elif self.config.timestep_spacing == "leading":
a : Dict = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a : Optional[Any] = (np.arange(0 , UpperCAmelCase_) * step_ratio).round()[::-1].copy().astype(UpperCAmelCase_)
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
a : Any = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
a : Dict = (np.arange(UpperCAmelCase_ , 0 , -step_ratio)).round().copy().astype(UpperCAmelCase_)
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""")
a : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5)
a : str = np.log(UpperCAmelCase_)
a : Any = np.interp(UpperCAmelCase_ , np.arange(0 , len(UpperCAmelCase_)) , UpperCAmelCase_)
if self.config.use_karras_sigmas:
a : Optional[int] = self._convert_to_karras(in_sigmas=UpperCAmelCase_ , num_inference_steps=self.num_inference_steps)
a : List[str] = np.array([self._sigma_to_t(UpperCAmelCase_ , UpperCAmelCase_) for sigma in sigmas])
a : List[Any] = np.concatenate([sigmas, [0.0]]).astype(np.floataa)
a : str = torch.from_numpy(UpperCAmelCase_).to(device=UpperCAmelCase_)
a : Optional[int] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2), sigmas[-1:]])
a : str = torch.from_numpy(UpperCAmelCase_)
a : Optional[int] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2)])
if str(UpperCAmelCase_).startswith('mps'):
# mps does not support float64
a : Optional[Any] = timesteps.to(UpperCAmelCase_ , dtype=torch.floataa)
else:
a : List[str] = timesteps.to(device=UpperCAmelCase_)
# empty dt and derivative
a : Tuple = None
a : List[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
a : Optional[Any] = defaultdict(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : str = np.log(UpperCAmelCase_)
# get distribution
a : int = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
a : Union[str, Any] = np.cumsum((dists >= 0) , axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2)
a : Union[str, Any] = low_idx + 1
a : Tuple = log_sigmas[low_idx]
a : str = log_sigmas[high_idx]
# interpolate sigmas
a : List[Any] = (low - log_sigma) / (low - high)
a : str = np.clip(UpperCAmelCase_ , 0 , 1)
# transform interpolation to time range
a : str = (1 - w) * low_idx + w * high_idx
a : List[Any] = t.reshape(sigma.shape)
return t
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : float = in_sigmas[-1].item()
a : float = in_sigmas[0].item()
a : int = 7.0 # 7.0 is the value used in the paper
a : Any = np.linspace(0 , 1 , UpperCAmelCase_)
a : List[Any] = sigma_min ** (1 / rho)
a : Any = sigma_max ** (1 / rho)
a : str = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return self.dt is None
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , UpperCAmelCase_ : Union[float, torch.FloatTensor] , UpperCAmelCase_ : Union[torch.FloatTensor, np.ndarray] , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
a : Optional[int] = self.index_for_timestep(UpperCAmelCase_)
# advance index counter by 1
a : Optional[Any] = timestep.cpu().item() if torch.is_tensor(UpperCAmelCase_) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
a : Tuple = self.sigmas[step_index]
a : Dict = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
a : List[str] = self.sigmas[step_index - 1]
a : Optional[Any] = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
a : Any = 0
a : Optional[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
a : Any = sigma_hat if self.state_in_first_order else sigma_next
a : str = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
a : List[str] = sigma_hat if self.state_in_first_order else sigma_next
a : Tuple = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
a : Optional[int] = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""")
if self.config.clip_sample:
a : Any = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range)
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
a : Any = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
a : Any = sigma_next - sigma_hat
# store for 2nd order step
a : str = derivative
a : Any = dt
a : Optional[int] = sample
else:
# 2. 2nd order / Heun's method
a : str = (sample - pred_original_sample) / sigma_next
a : List[str] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
a : str = self.dt
a : Optional[int] = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
a : Optional[Any] = None
a : Optional[Any] = None
a : List[str] = None
a : int = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : torch.FloatTensor , ):
"""simple docstring"""
a : int = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype)
if original_samples.device.type == "mps" and torch.is_floating_point(UpperCAmelCase_):
# mps does not support float64
a : Dict = self.timesteps.to(original_samples.device , dtype=torch.floataa)
a : Union[str, Any] = timesteps.to(original_samples.device , dtype=torch.floataa)
else:
a : Dict = self.timesteps.to(original_samples.device)
a : Optional[Any] = timesteps.to(original_samples.device)
a : Dict = [self.index_for_timestep(UpperCAmelCase_ , UpperCAmelCase_) for t in timesteps]
a : List[Any] = sigmas[step_indices].flatten()
while len(sigma.shape) < len(original_samples.shape):
a : Optional[Any] = sigma.unsqueeze(-1)
a : Tuple = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int]):
"""simple docstring"""
return self.config.num_train_timesteps
| 345 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
UpperCamelCase : List[Any] = {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/config.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/config.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/config.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/config.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/config.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/config.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "albert"
def __init__( self : int , UpperCAmelCase_ : Optional[int]=3_0_0_0_0 , UpperCAmelCase_ : int=1_2_8 , UpperCAmelCase_ : List[str]=4_0_9_6 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Any=6_4 , UpperCAmelCase_ : int=1_6_3_8_4 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : List[Any]="gelu_new" , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Union[str, Any]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : str=1e-12 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Any="absolute" , UpperCAmelCase_ : Optional[int]=0 , UpperCAmelCase_ : Tuple=2 , UpperCAmelCase_ : Optional[Any]=3 , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : str = vocab_size
a : Dict = embedding_size
a : List[str] = hidden_size
a : Tuple = num_hidden_layers
a : Tuple = num_hidden_groups
a : Optional[int] = num_attention_heads
a : Optional[int] = inner_group_num
a : Dict = hidden_act
a : Optional[Any] = intermediate_size
a : Any = hidden_dropout_prob
a : List[str] = attention_probs_dropout_prob
a : Tuple = max_position_embeddings
a : Optional[int] = type_vocab_size
a : Any = initializer_range
a : str = layer_norm_eps
a : List[Any] = classifier_dropout_prob
a : Tuple = position_embedding_type
class UpperCamelCase ( a_ ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
if self.task == "multiple-choice":
a : Optional[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
a : int = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
])
| 345 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 1 |
'''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int = 0 ) -> list:
"""simple docstring"""
a : int = length or len(snake_case )
a : Optional[Any] = False
for i in range(length - 1 ):
if list_data[i] > list_data[i + 1]:
a , a : List[Any] = list_data[i + 1], list_data[i]
a : Union[str, Any] = True
return list_data if not swapped else bubble_sort(snake_case , length - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 1 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCamelCase ( TensorFormatter[Mapping, "torch.Tensor", Mapping] ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : Optional[int]=None , **UpperCAmelCase_ : Dict):
"""simple docstring"""
super().__init__(features=UpperCAmelCase_)
a : Optional[Any] = torch_tensor_kwargs
import torch # noqa import torch at initialization
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
import torch
if isinstance(UpperCAmelCase_ , UpperCAmelCase_) and column:
if all(
isinstance(UpperCAmelCase_ , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(UpperCAmelCase_)
return column
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
import torch
if isinstance(UpperCAmelCase_ , (str, bytes, type(UpperCAmelCase_))):
return value
elif isinstance(UpperCAmelCase_ , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
a : Optional[Any] = {}
if isinstance(UpperCAmelCase_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
a : List[Any] = {'dtype': torch.intaa}
elif isinstance(UpperCAmelCase_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
a : List[str] = {'dtype': torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase_ , PIL.Image.Image):
a : Dict = np.asarray(UpperCAmelCase_)
return torch.tensor(UpperCAmelCase_ , **{**default_dtype, **self.torch_tensor_kwargs})
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase_ , '__array__') and not isinstance(UpperCAmelCase_ , torch.Tensor):
a : Optional[Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase_ , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_) for substruct in data_struct])
elif isinstance(UpperCAmelCase_ , (list, tuple)):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase_) for substruct in data_struct])
return self._tensorize(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : dict):
"""simple docstring"""
return map_nested(self._recursive_tensorize , UpperCAmelCase_ , map_list=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : pa.Table):
"""simple docstring"""
a : List[Any] = self.numpy_arrow_extractor().extract_row(UpperCAmelCase_)
a : Tuple = self.python_features_decoder.decode_row(UpperCAmelCase_)
return self.recursive_tensorize(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : pa.Table):
"""simple docstring"""
a : int = self.numpy_arrow_extractor().extract_column(UpperCAmelCase_)
a : Union[str, Any] = self.python_features_decoder.decode_column(UpperCAmelCase_ , pa_table.column_names[0])
a : str = self.recursive_tensorize(UpperCAmelCase_)
a : str = self._consolidate(UpperCAmelCase_)
return column
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : pa.Table):
"""simple docstring"""
a : int = self.numpy_arrow_extractor().extract_batch(UpperCAmelCase_)
a : List[str] = self.python_features_decoder.decode_batch(UpperCAmelCase_)
a : Tuple = self.recursive_tensorize(UpperCAmelCase_)
for column_name in batch:
a : Any = self._consolidate(batch[column_name])
return batch
| 345 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 1 |
'''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
from bisect import bisect_left
from functools import total_ordering
from heapq import merge
@total_ordering
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __lt__( self : str , UpperCAmelCase_ : Dict):
"""simple docstring"""
return self[-1] < other[-1]
def __eq__( self : Optional[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self[-1] == other[-1]
def SCREAMING_SNAKE_CASE__ ( snake_case : list ) -> list:
"""simple docstring"""
a : list[Stack] = []
# sort into stacks
for element in collection:
a : Optional[int] = Stack([element] )
a : Union[str, Any] = bisect_left(snake_case , snake_case )
if i != len(snake_case ):
stacks[i].append(snake_case )
else:
stacks.append(snake_case )
# use a heap-based merge to merge stack efficiently
a : List[Any] = merge(*(reversed(snake_case ) for stack in stacks) )
return collection
if __name__ == "__main__":
UpperCamelCase : str = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase : Dict = [int(item) for item in user_input.split(""",""")]
print(patience_sort(unsorted))
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 1 |
'''simple docstring'''
import numpy as np
def SCREAMING_SNAKE_CASE__ ( snake_case : np.array ) -> np.array:
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 1 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : Tuple = {
"""google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = "umt5"
A : Dict = ["past_key_values"]
def __init__( self : List[str] , UpperCAmelCase_ : Optional[int]=2_5_0_1_1_2 , UpperCAmelCase_ : str=5_1_2 , UpperCAmelCase_ : Tuple=6_4 , UpperCAmelCase_ : Optional[Any]=1_0_2_4 , UpperCAmelCase_ : Tuple=8 , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : List[Any]=6 , UpperCAmelCase_ : str=3_2 , UpperCAmelCase_ : Union[str, Any]=1_2_8 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Dict=1e-6 , UpperCAmelCase_ : str=1.0 , UpperCAmelCase_ : List[Any]="gated-gelu" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]="T5Tokenizer" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Optional[Any]=0 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : str=0 , **UpperCAmelCase_ : str , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=UpperCAmelCase_ , tokenizer_class=UpperCAmelCase_ , tie_word_embeddings=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : Union[str, Any] = vocab_size
a : Optional[int] = d_model
a : Any = d_kv
a : Optional[Any] = d_ff
a : str = num_layers
a : Union[str, Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
a : Optional[int] = num_heads
a : int = relative_attention_num_buckets
a : Optional[int] = relative_attention_max_distance
a : List[str] = dropout_rate
a : List[Any] = layer_norm_epsilon
a : Optional[int] = initializer_factor
a : Union[str, Any] = feed_forward_proj
a : Optional[Any] = use_cache
a : Dict = self.feed_forward_proj.split('-')
a : Tuple = act_info[-1]
a : str = act_info[0] == 'gated'
if len(UpperCAmelCase_) > 1 and act_info[0] != "gated" or len(UpperCAmelCase_) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
a : List[str] = 'gelu_new'
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return self.d_model
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return self.num_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.num_layers
class UpperCamelCase ( a_ ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
a : List[str] = 'past_encoder_sequence + sequence'
a : Tuple = {0: 'batch'}
a : str = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
a : Union[str, Any] = {0: 'batch', 1: 'decoder_sequence'}
a : List[Any] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase_ , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return 1_3
@property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return 5e-4
| 345 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=1_0 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : int=3_2 * 4 , UpperCAmelCase_ : List[Any]=3_2 * 6 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : List[Any]=3_2 , ):
"""simple docstring"""
a : Optional[int] = parent
a : Optional[Any] = batch_size
a : Tuple = is_training
a : str = use_auxiliary_loss
a : Any = num_queries
a : Dict = num_channels
a : Any = min_size
a : str = max_size
a : List[str] = num_labels
a : int = mask_feature_size
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size]).to(
UpperCAmelCase_)
a : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] , device=UpperCAmelCase_)
a : Dict = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=UpperCAmelCase_) > 0.5
).float()
a : Optional[int] = (torch.rand((self.batch_size, self.num_labels) , device=UpperCAmelCase_) > 0.5).long()
a : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a , a , a , a , a : List[Any] = self.prepare_config_and_inputs()
a : Optional[int] = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : List[str] = output.encoder_hidden_states
a : Optional[int] = output.pixel_decoder_hidden_states
a : int = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(UpperCAmelCase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCAmelCase_) , len(config.backbone_config.depths))
self.parent.assertTrue(len(UpperCAmelCase_) , config.decoder_config.decoder_layers)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any]=False):
"""simple docstring"""
with torch.no_grad():
a : Tuple = MaskFormerModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_)
a : Dict = model(UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_)
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(output.encoder_last_hidden_state is not None)
if output_hidden_states:
self.check_output_hidden_state(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = MaskFormerForInstanceSegmentation(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
def comm_check_on_output(UpperCAmelCase_ : List[Any]):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None)
self.parent.assertTrue(result.encoder_last_hidden_state is not None)
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1))
with torch.no_grad():
a : Tuple = model(pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_)
a : Any = model(UpperCAmelCase_)
comm_check_on_output(UpperCAmelCase_)
a : Optional[int] = model(
pixel_values=UpperCAmelCase_ , pixel_mask=UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_)
comm_check_on_output(UpperCAmelCase_)
self.parent.assertTrue(result.loss is not None)
self.parent.assertEqual(result.loss.shape , torch.Size([1]))
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
A : Optional[int] = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
A : int = False
A : Tuple = False
A : List[str] = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = MaskFormerModelTester(self)
a : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a , a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*UpperCAmelCase_)
@unittest.skip(reason='MaskFormer does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not have a get_input_embeddings method')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer is not a generative model')
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
@unittest.skip(reason='MaskFormer does not use token embeddings')
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
@require_torch_multi_gpu
@unittest.skip(
reason='MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`')
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.')
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : int = model_class(UpperCAmelCase_)
a : Any = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
for model_name in ["facebook/maskformer-swin-small-coco"]:
a : Optional[int] = MaskFormerModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[Any] = (self.model_tester.min_size,) * 2
a : int = {
'pixel_values': torch.randn((2, 3, *size) , device=UpperCAmelCase_),
'mask_labels': torch.randn((2, 1_0, *size) , device=UpperCAmelCase_),
'class_labels': torch.zeros(2 , 1_0 , device=UpperCAmelCase_).long(),
}
a : Dict = MaskFormerForInstanceSegmentation(MaskFormerConfig()).to(UpperCAmelCase_)
a : Optional[int] = model(**UpperCAmelCase_)
self.assertTrue(outputs.loss is not None)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(UpperCAmelCase_ , **UpperCAmelCase_ , output_hidden_states=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : int = model_class(UpperCAmelCase_).to(UpperCAmelCase_)
a : List[str] = model(**UpperCAmelCase_ , output_attentions=UpperCAmelCase_)
self.assertTrue(outputs.attentions is not None)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
a : Union[str, Any] = self.all_model_classes[1]
a , a , a , a , a : Optional[int] = self.model_tester.prepare_config_and_inputs()
a : Union[str, Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
a : Optional[int] = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Union[str, Any] = self.all_model_classes[1]
a , a , a , a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
a : int = True
a : Any = True
a : int = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
a : List[str] = model(UpperCAmelCase_ , mask_labels=UpperCAmelCase_ , class_labels=UpperCAmelCase_)
a : Tuple = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
a : Optional[int] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
a : List[Any] = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
a : int = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=UpperCAmelCase_)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(pixel_decoder_hidden_states.grad)
self.assertIsNotNone(transformer_decoder_hidden_states.grad)
self.assertIsNotNone(attentions.grad)
UpperCamelCase : str = 1E-4
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return (
MaskFormerImageProcessor.from_pretrained('facebook/maskformer-swin-small-coco')
if is_vision_available()
else None
)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = MaskFormerModel.from_pretrained('facebook/maskformer-swin-small-coco').to(UpperCAmelCase_)
a : Tuple = self.default_image_processor
a : Optional[int] = prepare_img()
a : int = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
a : Dict = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
a : Optional[Any] = model(**UpperCAmelCase_)
a : Optional[int] = torch.tensor(
[[-0.04_82, 0.92_28, 0.49_51], [-0.25_47, 0.80_17, 0.85_27], [-0.00_69, 0.33_85, -0.00_89]]).to(UpperCAmelCase_)
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
a : Dict = torch.tensor(
[[-0.84_22, -0.84_34, -0.97_18], [-1.01_44, -0.55_65, -0.41_95], [-1.00_38, -0.44_84, -0.19_61]]).to(UpperCAmelCase_)
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
a : Optional[int] = torch.tensor(
[[0.28_52, -0.01_59, 0.97_35], [0.62_54, 0.18_58, 0.85_29], [-0.06_80, -0.41_16, 1.84_13]]).to(UpperCAmelCase_)
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCAmelCase_)
.eval()
)
a : Dict = self.default_image_processor
a : Optional[Any] = prepare_img()
a : List[Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
a : List[str] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
a : str = model(**UpperCAmelCase_)
# masks_queries_logits
a : List[str] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a : Dict = [
[-1.3_73_71_24, -1.7_72_49_37, -1.9_36_42_33],
[-1.5_97_72_81, -1.9_86_79_39, -2.1_52_36_95],
[-1.5_79_53_98, -1.9_26_98_32, -2.09_39_42],
]
a : Any = torch.tensor(UpperCAmelCase_).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
# class_queries_logits
a : Tuple = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
a : Dict = torch.tensor(
[
[1.6_512e00, -5.2_572e00, -3.3_519e00],
[3.6_169e-02, -5.9_025e00, -2.9_313e00],
[1.0_766e-04, -7.7_630e00, -5.1_263e00],
]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-resnet101-coco-stuff')
.to(UpperCAmelCase_)
.eval()
)
a : List[Any] = self.default_image_processor
a : str = prepare_img()
a : Union[str, Any] = image_processor(UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
a : Union[str, Any] = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0)
# check size
self.assertEqual(UpperCAmelCase_ , (1, 3, 8_0_0, 1_0_8_8))
with torch.no_grad():
a : Optional[int] = model(**UpperCAmelCase_)
# masks_queries_logits
a : Any = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
a : str = [[-0.90_46, -2.63_66, -4.60_62], [-3.41_79, -5.78_90, -8.80_57], [-4.91_79, -7.65_60, -10.77_11]]
a : Dict = torch.tensor(UpperCAmelCase_).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
# class_queries_logits
a : Optional[int] = outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1))
a : List[str] = torch.tensor(
[[4.71_88, -3.25_85, -2.88_57], [6.68_71, -2.91_81, -1.24_87], [7.24_49, -2.27_64, -2.18_74]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , UpperCAmelCase_ , atol=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Tuple = (
MaskFormerForInstanceSegmentation.from_pretrained('facebook/maskformer-swin-small-coco')
.to(UpperCAmelCase_)
.eval()
)
a : Optional[Any] = self.default_image_processor
a : Dict = image_processor(
[np.zeros((3, 8_0_0, 1_3_3_3)), np.zeros((3, 8_0_0, 1_3_3_3))] , segmentation_maps=[np.zeros((3_8_4, 3_8_4)).astype(np.floataa), np.zeros((3_8_4, 3_8_4)).astype(np.floataa)] , return_tensors='pt' , )
a : List[str] = inputs['pixel_values'].to(UpperCAmelCase_)
a : int = [el.to(UpperCAmelCase_) for el in inputs['mask_labels']]
a : Tuple = [el.to(UpperCAmelCase_) for el in inputs['class_labels']]
with torch.no_grad():
a : Any = model(**UpperCAmelCase_)
self.assertTrue(outputs.loss is not None)
| 345 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 1 |
'''simple docstring'''
from sklearn.metrics import fa_score
import datasets
UpperCamelCase : str = """
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
"""
UpperCamelCase : Any = """
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{'f1': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results['f1'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric(\"f1\")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results['f1'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")
>>> print(round(results['f1'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")
>>> print(round(results['f1'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'f1': array([0.8, 0. , 0. ])}
"""
UpperCamelCase : str = """
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('int32')),
'references': datasets.Sequence(datasets.Value('int32')),
}
if self.config_name == 'multilabel'
else {
'predictions': datasets.Value('int32'),
'references': datasets.Value('int32'),
}) , reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Any="binary" , UpperCAmelCase_ : str=None):
"""simple docstring"""
a : str = fa_score(
UpperCAmelCase_ , UpperCAmelCase_ , labels=UpperCAmelCase_ , pos_label=UpperCAmelCase_ , average=UpperCAmelCase_ , sample_weight=UpperCAmelCase_)
return {"f1": float(UpperCAmelCase_) if score.size == 1 else score}
| 345 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
a : Any = 1 # To kept the Calculated Value
# Since C(n, k) = C(n, n-k)
if k > (n - k):
a : Any = n - k
# Calculate C(n,k)
for i in range(snake_case ):
result *= n - i
result //= i + 1
return result
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
return binomial_coefficient(2 * node_count , snake_case ) // (node_count + 1)
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
if n < 0:
raise ValueError('factorial() not defined for negative values' )
a : Dict = 1
for i in range(1 , n + 1 ):
result *= i
return result
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> int:
"""simple docstring"""
return catalan_number(snake_case ) * factorial(snake_case )
if __name__ == "__main__":
UpperCamelCase : Tuple = int(input("""Enter the number of nodes: """).strip() or 0)
if node_count <= 0:
raise ValueError("""We need some nodes to work with.""")
print(
f'''Given {node_count} nodes, there are {binary_tree_count(node_count)} '''
f'''binary trees and {catalan_number(node_count)} binary search trees.'''
)
| 345 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 1 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 345 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = mock.Mock()
a : Tuple = 5_0_0
a : Union[str, Any] = {}
a : List[str] = HTTPError
a : int = {}
# Download this model to make sure it's in the cache.
a : List[str] = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_) as mock_head:
a : Any = BertTokenizer.from_pretrained('hf-internal-testing/tiny-random-bert')
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = mock.Mock()
a : Any = 5_0_0
a : Tuple = {}
a : str = HTTPError
a : Optional[Any] = {}
# Download this model to make sure it's in the cache.
a : Any = GPTaTokenizerFast.from_pretrained('gpt2')
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch('requests.Session.request' , return_value=UpperCAmelCase_) as mock_head:
a : Tuple = GPTaTokenizerFast.from_pretrained('gpt2')
# This check we did call the fake head request
mock_head.assert_called()
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
try:
a : Optional[int] = tempfile.mktemp()
with open(UpperCAmelCase_ , 'wb') as f:
http_get('https://huggingface.co/albert-base-v1/resolve/main/spiece.model' , UpperCAmelCase_)
a : List[str] = AlbertTokenizer.from_pretrained(UpperCAmelCase_)
finally:
os.remove(UpperCAmelCase_)
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile('tokenizer.json'):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open('tokenizer.json' , 'wb') as f:
http_get('https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json' , UpperCAmelCase_)
a : Dict = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-random-gpt2')
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_0_0_0)
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove('tokenizer.json')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[Any] = AlbertTokenizer.from_pretrained('https://huggingface.co/albert-base-v1/resolve/main/spiece.model')
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
A : int = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "bla", "blou"]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any):
"""simple docstring"""
a : str = TOKEN
HfFolder.save_token(UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str]):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id='test-tokenizer')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-tokenizer-org')
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-tokenizer')
except HTTPError:
pass
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a : Optional[Any] = os.path.join(UpperCAmelCase_ , 'vocab.txt')
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
a : int = BertTokenizer(UpperCAmelCase_)
tokenizer.push_to_hub('test-tokenizer' , use_auth_token=self._token)
a : Optional[int] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='test-tokenizer')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(UpperCAmelCase_ , repo_id='test-tokenizer' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
a : Union[str, Any] = BertTokenizer.from_pretrained(f"""{USER}/test-tokenizer""")
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
a : Any = os.path.join(UpperCAmelCase_ , 'vocab.txt')
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
a : str = BertTokenizer(UpperCAmelCase_)
tokenizer.push_to_hub('valid_org/test-tokenizer-org' , use_auth_token=self._token)
a : int = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-tokenizer-org')
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
UpperCAmelCase_ , repo_id='valid_org/test-tokenizer-org' , push_to_hub=UpperCAmelCase_ , use_auth_token=self._token)
a : List[Any] = BertTokenizer.from_pretrained('valid_org/test-tokenizer-org')
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab)
@require_tokenizers
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a : Any = os.path.join(UpperCAmelCase_ , 'vocab.txt')
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
a : int = CustomTokenizer(UpperCAmelCase_)
# No fast custom tokenizer
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
a : Union[str, Any] = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
a : Union[str, Any] = os.path.join(UpperCAmelCase_ , 'vocab.txt')
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens]))
a : Tuple = BertTokenizerFast.from_pretrained(UpperCAmelCase_)
bert_tokenizer.save_pretrained(UpperCAmelCase_)
a : List[str] = CustomTokenizerFast.from_pretrained(UpperCAmelCase_)
tokenizer.push_to_hub('test-dynamic-tokenizer' , use_auth_token=self._token)
a : Any = AutoTokenizer.from_pretrained(f"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizerFast')
a : Union[str, Any] = AutoTokenizer.from_pretrained(
f"""{USER}/test-dynamic-tokenizer""" , use_fast=UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_)
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , 'CustomTokenizer')
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = Trie()
trie.add('Hello 友達')
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {' ': {'友': {'達': {'': 1}}}}}}}}})
trie.add('Hello')
trie.data
self.assertEqual(trie.data , {'H': {'e': {'l': {'l': {'o': {'': 1, ' ': {'友': {'達': {'': 1}}}}}}}}})
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = Trie()
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS] This is a extra_id_100'])
trie.add('[CLS]')
trie.add('extra_id_1')
trie.add('extra_id_100')
self.assertEqual(trie.split('[CLS] This is a extra_id_100') , ['[CLS]', ' This is a ', 'extra_id_100'])
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : List[Any] = Trie()
trie.add('A')
self.assertEqual(trie.split('ABC') , ['A', 'BC'])
self.assertEqual(trie.split('BCA') , ['BC', 'A'])
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[Any] = Trie()
trie.add('TOKEN]')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[str] = Trie()
trie.add('A')
trie.add('P')
trie.add('[SPECIAL_TOKEN]')
self.assertEqual(trie.split('This is something [SPECIAL_TOKEN]') , ['This is something ', '[SPECIAL_TOKEN]'])
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : int = Trie()
trie.add('AB')
trie.add('B')
trie.add('C')
self.assertEqual(trie.split('ABC') , ['AB', 'C'])
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = Trie()
trie.add('ABC')
trie.add('B')
trie.add('CD')
self.assertEqual(trie.split('ABCD') , ['ABC', 'D'])
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Dict = Trie()
a : Dict = trie.cut_text('ABC' , [0, 0, 2, 1, 2, 3])
self.assertEqual(UpperCAmelCase_ , ['AB', 'C'])
| 345 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : int = {
"""facebook/deit-base-distilled-patch16-224""": (
"""https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json"""
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "deit"
def __init__( self : Any , UpperCAmelCase_ : Optional[int]=7_6_8 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : List[str]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : int="gelu" , UpperCAmelCase_ : Optional[Any]=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Dict=1e-12 , UpperCAmelCase_ : str=2_2_4 , UpperCAmelCase_ : List[str]=1_6 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Optional[Any]=1_6 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : Optional[Any] = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Optional[Any] = hidden_act
a : str = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Dict = initializer_range
a : Optional[Any] = layer_norm_eps
a : Dict = image_size
a : int = patch_size
a : List[Any] = num_channels
a : Optional[int] = qkv_bias
a : str = encoder_stride
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = version.parse("1.11" )
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
@property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return 1e-4
| 345 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE__ ( snake_case : NDArray[floataa] , snake_case : NDArray[floataa] , snake_case : list[int] , snake_case : int , ) -> list[float]:
"""simple docstring"""
a , a : Union[str, Any] = coefficient_matrix.shape
a , a : int = constant_matrix.shape
if rowsa != colsa:
a : Union[str, Any] = F"""Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}"""
raise ValueError(snake_case )
if colsa != 1:
a : Union[str, Any] = F"""Constant matrix must be nx1 but received {rowsa}x{colsa}"""
raise ValueError(snake_case )
if rowsa != rowsa:
a : Dict = (
'Coefficient and constant matrices dimensions must be nxn and nx1 but '
F"""received {rowsa}x{colsa} and {rowsa}x{colsa}"""
)
raise ValueError(snake_case )
if len(snake_case ) != rowsa:
a : Union[str, Any] = (
'Number of initial values must be equal to number of rows in coefficient '
F"""matrix but received {len(snake_case )} and {rowsa}"""
)
raise ValueError(snake_case )
if iterations <= 0:
raise ValueError('Iterations must be at least 1' )
a : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
a , a : str = table.shape
strictly_diagonally_dominant(snake_case )
# Iterates the whole matrix for given number of times
for _ in range(snake_case ):
a : Optional[int] = []
for row in range(snake_case ):
a : str = 0
for col in range(snake_case ):
if col == row:
a : int = table[row][col]
elif col == cols - 1:
a : Optional[int] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
a : List[str] = (temp + val) / denom
new_val.append(snake_case )
a : Union[str, Any] = new_val
return [float(snake_case ) for i in new_val]
def SCREAMING_SNAKE_CASE__ ( snake_case : NDArray[floataa] ) -> bool:
"""simple docstring"""
a , a : int = table.shape
a : Optional[int] = True
for i in range(0 , snake_case ):
a : Any = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('Coefficient matrix is not strictly diagonally dominant' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[int] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"""bigcode/gpt_bigcode-santacoder""": """https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[Any] = "gpt_bigcode"
A : str = ["past_key_values"]
A : Dict = {
"hidden_size": "n_embd",
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Any , UpperCAmelCase_ : Any=5_0_2_5_7 , UpperCAmelCase_ : Union[str, Any]=1_0_2_4 , UpperCAmelCase_ : Dict=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any="gelu_pytorch_tanh" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=1e-5 , UpperCAmelCase_ : Optional[int]=0.02 , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=5_0_2_5_6 , UpperCAmelCase_ : Optional[int]=5_0_2_5_6 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=True , **UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = vocab_size
a : str = n_positions
a : Any = n_embd
a : List[str] = n_layer
a : List[Any] = n_head
a : str = n_inner
a : Union[str, Any] = activation_function
a : List[Any] = resid_pdrop
a : Optional[Any] = embd_pdrop
a : int = attn_pdrop
a : List[str] = layer_norm_epsilon
a : Union[str, Any] = initializer_range
a : List[str] = scale_attn_weights
a : Any = use_cache
a : int = attention_softmax_in_fpaa
a : Tuple = scale_attention_softmax_in_fpaa
a : Union[str, Any] = multi_query
a : Dict = bos_token_id
a : Dict = eos_token_id
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
| 345 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["sentencepiece"]
def __init__( self : int , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["sentencepiece"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["sentencepiece"]
def __init__( self : str , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[int] = ["sentencepiece"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["sentencepiece"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["sentencepiece"]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["sentencepiece"]
def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["sentencepiece"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["sentencepiece"]
def __init__( self : List[str] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["sentencepiece"]
def __init__( self : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Any = ["sentencepiece"]
def __init__( self : Optional[int] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : str = ["sentencepiece"]
def __init__( self : Any , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["sentencepiece"]
def __init__( self : List[str] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["sentencepiece"]
def __init__( self : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["sentencepiece"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["sentencepiece"]
def __init__( self : str , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["sentencepiece"]
def __init__( self : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["sentencepiece"]
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Any):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["sentencepiece"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[Any] = ["sentencepiece"]
def __init__( self : int , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Dict = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["sentencepiece"]
def __init__( self : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["sentencepiece"]
def __init__( self : Dict , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["sentencepiece"]
def __init__( self : List[str] , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : int):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["sentencepiece"]
def __init__( self : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : int = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Optional[Any] = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : Union[str, Any] = ["sentencepiece"]
def __init__( self : Tuple , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
class UpperCamelCase ( metaclass=a_ ):
"""simple docstring"""
A : List[str] = ["sentencepiece"]
def __init__( self : List[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
requires_backends(self , ['sentencepiece'])
| 345 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , ):
"""simple docstring"""
a : Optional[int] = parent
a : Dict = 1_3
a : int = 7
a : Optional[int] = True
a : Tuple = True
a : Optional[Any] = True
a : Optional[int] = 9_9
a : Tuple = 3_2
a : Any = 2
a : Optional[int] = 4
a : str = 3_7
a : str = 'gelu'
a : Any = 0.1
a : List[str] = 0.1
a : Optional[int] = 5_1_2
a : Union[str, Any] = 1_6
a : Optional[Any] = 2
a : Optional[Any] = 0.02
a : Dict = 3
a : Optional[int] = 4
a : Tuple = None
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : Union[str, Any] = None
if self.use_input_mask:
a : Tuple = random_attention_mask([self.batch_size, self.seq_length])
a : int = None
a : List[str] = None
a : int = None
if self.use_labels:
a : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : int = ids_tensor([self.batch_size] , self.num_choices)
a : Dict = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : List[Any] = self.prepare_config_and_inputs()
a : str = True
a : Any = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
a : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str):
"""simple docstring"""
a : Optional[Any] = TFEsmModel(config=UpperCAmelCase_)
a : int = {'input_ids': input_ids, 'attention_mask': input_mask}
a : Dict = model(UpperCAmelCase_)
a : Union[str, Any] = [input_ids, input_mask]
a : Any = model(UpperCAmelCase_)
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Tuple = True
a : Optional[Any] = TFEsmModel(config=UpperCAmelCase_)
a : str = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
a : Tuple = model(UpperCAmelCase_)
a : Union[str, Any] = [input_ids, input_mask]
a : str = model(UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_)
# Also check the case where encoder outputs are not passed
a : List[str] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : Optional[int] = TFEsmForMaskedLM(config=UpperCAmelCase_)
a : List[Any] = model([input_ids, input_mask])
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Dict = self.num_labels
a : Dict = TFEsmForTokenClassification(config=UpperCAmelCase_)
a : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Any = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : str = config_and_inputs
a : List[str] = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Any = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
A : Tuple = (
{
"feature-extraction": TFEsmModel,
"fill-mask": TFEsmForMaskedLM,
"text-classification": TFEsmForSequenceClassification,
"token-classification": TFEsmForTokenClassification,
"zero-shot": TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Dict = False
A : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[Any] = TFEsmModelTester(self)
a : int = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : str = TFEsmModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass
@unittest.skip('Protein models do not support embedding resizing.')
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a , a : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(UpperCAmelCase_)
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer)
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
a : Union[str, Any] = model.get_bias()
assert isinstance(UpperCAmelCase_ , UpperCAmelCase_)
for k, v in name.items():
assert isinstance(UpperCAmelCase_ , tf.Variable)
else:
a : int = model.get_output_embeddings()
assert x is None
a : Tuple = model.get_bias()
assert name is None
@require_tf
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D')
a : Dict = tf.constant([[0, 1, 2, 3, 4, 5]])
a : List[str] = model(UpperCAmelCase_)[0]
a : Union[str, Any] = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape) , UpperCAmelCase_)
# compare the actual values for a slice.
a : Any = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-2))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Union[str, Any] = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D')
a : Tuple = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]])
a : Any = model(UpperCAmelCase_)[0]
# compare the actual values for a slice.
a : Dict = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
])
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4))
| 345 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase : List[Any] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
UpperCamelCase : Any = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
UpperCamelCase : List[str] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types()) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float')),
"references": datasets.Sequence(datasets.Value('float')),
}
else:
return {
"predictions": datasets.Value('float'),
"references": datasets.Value('float'),
}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict="uniform_average" , UpperCAmelCase_ : Optional[Any]=True):
"""simple docstring"""
a : Optional[Any] = mean_squared_error(
UpperCAmelCase_ , UpperCAmelCase_ , sample_weight=UpperCAmelCase_ , multioutput=UpperCAmelCase_ , squared=UpperCAmelCase_)
return {"mse": mse}
| 345 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 1 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
UpperCamelCase : Tuple = None
try:
import msvcrt
except ImportError:
UpperCamelCase : List[Any] = None
try:
import fcntl
except ImportError:
UpperCamelCase : Tuple = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
UpperCamelCase : str = OSError
# Data
# ------------------------------------------------
UpperCamelCase : int = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
UpperCamelCase : Union[str, Any] = """3.0.12"""
UpperCamelCase : Optional[Any] = None
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
"""simple docstring"""
global _logger
a : Union[str, Any] = _logger or logging.getLogger(__name__ )
return _logger
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Optional[Any] = lock_file
return None
def __str__( self : str):
"""simple docstring"""
a : Union[str, Any] = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Union[str, Any] = lock
return None
def __enter__( self : Optional[Any]):
"""simple docstring"""
return self.lock
def __exit__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict):
"""simple docstring"""
self.lock.release()
return None
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[str]=-1 , UpperCAmelCase_ : int=None):
"""simple docstring"""
a : Optional[int] = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
a : Optional[int] = self.hash_filename_if_too_long(UpperCAmelCase_ , UpperCAmelCase_)
# The path to the lock file.
a : str = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
a : List[Any] = None
# The default timeout value.
a : List[Any] = timeout
# We use this lock primarily for the lock counter.
a : Dict = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
a : str = 0
return None
@property
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
return self._lock_file
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return self._timeout
@timeout.setter
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Optional[Any] = float(UpperCAmelCase_)
return None
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
raise NotImplementedError()
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
raise NotImplementedError()
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return self._lock_file_fd is not None
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=0.05):
"""simple docstring"""
if timeout is None:
a : List[Any] = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
a : List[str] = id(self)
a : Any = self._lock_file
a : List[Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""")
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""")
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""")
raise Timeout(self._lock_file)
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""")
time.sleep(UpperCAmelCase_)
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
a : int = max(0 , self._lock_counter - 1)
raise
return _Acquire_ReturnProxy(lock=self)
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[str]=False):
"""simple docstring"""
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
a : int = id(self)
a : Optional[Any] = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""")
self._release()
a : str = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""")
return None
def __enter__( self : Dict):
"""simple docstring"""
self.acquire()
return self
def __exit__( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Dict):
"""simple docstring"""
self.release()
return None
def __del__( self : Any):
"""simple docstring"""
self.release(force=UpperCAmelCase_)
return None
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : List[str] = os.path.basename(UpperCAmelCase_)
if len(UpperCAmelCase_) > max_length and max_length > 0:
a : int = os.path.dirname(UpperCAmelCase_)
a : str = str(hash(UpperCAmelCase_))
a : Dict = filename[: max_length - len(UpperCAmelCase_) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
else:
return path
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str=-1 , UpperCAmelCase_ : int=None):
"""simple docstring"""
from .file_utils import relative_to_absolute_path
super().__init__(UpperCAmelCase_ , timeout=UpperCAmelCase_ , max_filename_length=UpperCAmelCase_)
a : Tuple = '\\\\?\\' + relative_to_absolute_path(self.lock_file)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
a : str = os.open(self._lock_file , UpperCAmelCase_)
except OSError:
pass
else:
try:
msvcrt.locking(UpperCAmelCase_ , msvcrt.LK_NBLCK , 1)
except OSError:
os.close(UpperCAmelCase_)
else:
a : Tuple = fd
return None
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self._lock_file_fd
a : Any = None
msvcrt.locking(UpperCAmelCase_ , msvcrt.LK_UNLCK , 1)
os.close(UpperCAmelCase_)
try:
os.remove(self._lock_file)
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict=-1 , UpperCAmelCase_ : str=None):
"""simple docstring"""
a : Any = os.statvfs(os.path.dirname(UpperCAmelCase_)).f_namemax
super().__init__(UpperCAmelCase_ , timeout=UpperCAmelCase_ , max_filename_length=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
a : Tuple = os.open(self._lock_file , UpperCAmelCase_)
try:
fcntl.flock(UpperCAmelCase_ , fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError:
os.close(UpperCAmelCase_)
else:
a : Tuple = fd
return None
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[Any] = self._lock_file_fd
a : Any = None
fcntl.flock(UpperCAmelCase_ , fcntl.LOCK_UN)
os.close(UpperCAmelCase_)
return None
class UpperCamelCase ( a_ ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : int = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
a : List[Any] = os.open(self._lock_file , UpperCAmelCase_)
except OSError:
pass
else:
a : Tuple = fd
return None
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
os.close(self._lock_file_fd)
a : List[Any] = None
try:
os.remove(self._lock_file)
# The file is already deleted and that's what we want.
except OSError:
pass
return None
UpperCamelCase : List[str] = None
if msvcrt:
UpperCamelCase : Optional[Any] = WindowsFileLock
elif fcntl:
UpperCamelCase : Optional[Any] = UnixFileLock
else:
UpperCamelCase : Dict = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 345 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : Any = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = "trocr"
A : Optional[int] = ["past_key_values"]
A : Optional[Any] = {
"num_attention_heads": "decoder_attention_heads",
"hidden_size": "d_model",
"num_hidden_layers": "decoder_layers",
}
def __init__( self : Dict , UpperCAmelCase_ : List[Any]=5_0_2_6_5 , UpperCAmelCase_ : List[str]=1_0_2_4 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : int=1_6 , UpperCAmelCase_ : List[Any]=4_0_9_6 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Dict=5_1_2 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Dict=0.0 , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[Any]=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Optional[int]=2 , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : List[str] = vocab_size
a : Optional[int] = d_model
a : Any = decoder_layers
a : str = decoder_attention_heads
a : int = decoder_ffn_dim
a : str = activation_function
a : str = max_position_embeddings
a : Dict = dropout
a : Any = attention_dropout
a : Tuple = activation_dropout
a : List[str] = init_std
a : Optional[Any] = decoder_layerdrop
a : Dict = use_cache
a : List[Any] = scale_embedding
a : Dict = use_learned_position_embeddings
a : Dict = layernorm_embedding
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 1 |
'''simple docstring'''
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = n
a : Dict = [None] * self.n
a : int = 0 # index of the first element
a : Optional[int] = 0
a : Optional[Any] = 0
def __len__( self : int):
"""simple docstring"""
return self.size
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.size == 0
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
if self.size >= self.n:
raise Exception('QUEUE IS FULL')
a : int = data
a : str = (self.rear + 1) % self.n
self.size += 1
return self
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
if self.size == 0:
raise Exception('UNDERFLOW')
a : Union[str, Any] = self.array[self.front]
a : Optional[Any] = None
a : List[str] = (self.front + 1) % self.n
self.size -= 1
return temp
| 345 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : Tuple = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""ViTFeatureExtractor"""]
UpperCamelCase : Any = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
UpperCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 1 |
'''simple docstring'''
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : Tuple , snake_case : int ) -> Any:
"""simple docstring"""
a : Union[str, Any] = {
'en': 'Machine learning is great, isn\'t it?',
'ru': 'Машинное обучение - это здорово, не так ли?',
'de': 'Maschinelles Lernen ist großartig, nicht wahr?',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
a : Optional[Any] = {
'wmt16-en-de-dist-12-1': [28.3, 27.52],
'wmt16-en-de-dist-6-1': [27.4, 27.11],
'wmt16-en-de-12-1': [26.9, 25.75],
}
a : Union[str, Any] = F"""{src_lang}-{tgt_lang}"""
a : str = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=snake_case , exist_ok=snake_case )
a : List[str] = os.path.join(snake_case , 'README.md' )
print(F"""Generating {path}""" )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(snake_case )
# make sure we are under the root of the project
UpperCamelCase : Tuple = Path(__file__).resolve().parent.parent.parent
UpperCamelCase : Optional[int] = repo_dir / """model_cards"""
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
UpperCamelCase : Any = model_cards_dir / """allenai""" / model_name
write_model_card(model_card_dir, src_lang="""en""", tgt_lang="""de""", model_name=model_name)
| 345 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 1 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str=0.2 , UpperCAmelCase_ : Optional[int]=0.2):
"""simple docstring"""
a : int = bp_numa
a : int = bp_numa
a : List[Any] = bp_numa
a : int = conva_get[:2]
a : List[Any] = conva_get[2]
a : List[Any] = size_pa
a : Optional[Any] = rate_w
a : Tuple = rate_t
a : List[str] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0]) + 0.5)
for i in range(self.conva[1])
]
a : List[Any] = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a : str = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa) + 0.5)
a : Dict = -2 * np.random.rand(self.conva[1]) + 1
a : List[str] = -2 * np.random.rand(self.num_bpa) + 1
a : Optional[Any] = -2 * np.random.rand(self.num_bpa) + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : Tuple = {
'num_bp1': self.num_bpa,
'num_bp2': self.num_bpa,
'num_bp3': self.num_bpa,
'conv1': self.conva,
'step_conv1': self.step_conva,
'size_pooling1': self.size_poolinga,
'rate_weight': self.rate_weight,
'rate_thre': self.rate_thre,
'w_conv1': self.w_conva,
'wkj': self.wkj,
'vji': self.vji,
'thre_conv1': self.thre_conva,
'thre_bp2': self.thre_bpa,
'thre_bp3': self.thre_bpa,
}
with open(UpperCAmelCase_ , 'wb') as f:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
print(f"""Model saved: {save_path}""")
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : List[str] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
with open(UpperCAmelCase_ , 'rb') as f:
a : str = pickle.load(UpperCAmelCase_) # noqa: S301
a : Any = model_dic.get('conv1')
conv_get.append(model_dic.get('step_conv1'))
a : Dict = model_dic.get('size_pooling1')
a : str = model_dic.get('num_bp1')
a : Union[str, Any] = model_dic.get('num_bp2')
a : str = model_dic.get('num_bp3')
a : List[str] = model_dic.get('rate_weight')
a : str = model_dic.get('rate_thre')
# create model instance
a : Optional[Any] = CNN(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# modify model parameter
a : Union[str, Any] = model_dic.get('w_conv1')
a : int = model_dic.get('wkj')
a : List[Any] = model_dic.get('vji')
a : List[Any] = model_dic.get('thre_conv1')
a : Optional[Any] = model_dic.get('thre_bp2')
a : int = model_dic.get('thre_bp3')
return conv_ins
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int):
"""simple docstring"""
return 1 / (1 + np.exp(-1 * x))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return round(UpperCAmelCase_ , 3)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Dict = convs[0]
a : Tuple = convs[1]
a : Any = np.shape(UpperCAmelCase_)[0]
# get the data slice of original image data, data_focus
a : str = []
for i_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_):
for j_focus in range(0 , size_data - size_conv + 1 , UpperCAmelCase_):
a : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(UpperCAmelCase_)
# calculate the feature map of every single kernel, and saved as list of matrix
a : Union[str, Any] = []
a : Optional[Any] = int((size_data - size_conv) / conv_step + 1)
for i_map in range(UpperCAmelCase_):
a : Union[str, Any] = []
for i_focus in range(len(UpperCAmelCase_)):
a : List[Any] = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map]))
- thre_convs[i_map]
)
featuremap.append(self.sig(UpperCAmelCase_))
a : str = np.asmatrix(UpperCAmelCase_).reshape(
UpperCAmelCase_ , UpperCAmelCase_)
data_featuremap.append(UpperCAmelCase_)
# expanding the data slice to One dimenssion
a : Optional[int] = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(UpperCAmelCase_))
a : Union[str, Any] = np.asarray(UpperCAmelCase_)
return focus_list, data_featuremap
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple="average_pool"):
"""simple docstring"""
a : Dict = len(featuremaps[0])
a : Union[str, Any] = int(size_map / size_pooling)
a : Tuple = []
for i_map in range(len(UpperCAmelCase_)):
a : Dict = featuremaps[i_map]
a : str = []
for i_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
for j_focus in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
a : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(UpperCAmelCase_))
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(UpperCAmelCase_))
a : Any = np.asmatrix(UpperCAmelCase_).reshape(UpperCAmelCase_ , UpperCAmelCase_)
featuremap_pooled.append(UpperCAmelCase_)
return featuremap_pooled
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = []
for i in range(len(UpperCAmelCase_)):
a : int = np.shape(data[i])
a : int = data[i].reshape(1 , shapes[0] * shapes[1])
a : Optional[Any] = data_listed.getA().tolist()[0]
data_expanded.extend(UpperCAmelCase_)
a : Optional[int] = np.asarray(UpperCAmelCase_)
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : int):
"""simple docstring"""
a : Any = np.asarray(UpperCAmelCase_)
a : List[Any] = np.shape(UpperCAmelCase_)
a : Optional[int] = data_mat.reshape(1 , shapes[0] * shapes[1])
return data_expanded
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str):
"""simple docstring"""
a : Any = []
a : Optional[int] = 0
for i_map in range(UpperCAmelCase_):
a : Any = np.ones((size_map, size_map))
for i in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
for j in range(0 , UpperCAmelCase_ , UpperCAmelCase_):
a : Dict = pd_pool[
i_pool
]
a : List[str] = i_pool + 1
a : Dict = np.multiply(
UpperCAmelCase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map])))
pd_all.append(UpperCAmelCase_)
return pd_all
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any]=bool):
"""simple docstring"""
print('----------------------Start Training-------------------------')
print((' - - Shape: Train_Data ', np.shape(UpperCAmelCase_)))
print((' - - Shape: Teach_Data ', np.shape(UpperCAmelCase_)))
a : Optional[Any] = 0
a : Dict = []
a : str = 1_0_0_0_0
while rp < n_repeat and mse >= error_accuracy:
a : str = 0
print(f"""-------------Learning Time {rp}--------------""")
for p in range(len(UpperCAmelCase_)):
# print('------------Learning Image: %d--------------'%p)
a : str = np.asmatrix(datas_train[p])
a : int = np.asarray(datas_teach[p])
a , a : Dict = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a : Any = self.pooling(UpperCAmelCase_ , self.size_poolinga)
a : Dict = np.shape(UpperCAmelCase_)
a : List[Any] = self._expand(UpperCAmelCase_)
a : Union[str, Any] = data_bp_input
a : List[Any] = np.dot(UpperCAmelCase_ , self.vji.T) - self.thre_bpa
a : List[Any] = self.sig(UpperCAmelCase_)
a : str = np.dot(UpperCAmelCase_ , self.wkj.T) - self.thre_bpa
a : List[str] = self.sig(UpperCAmelCase_)
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
a : int = np.multiply(
(data_teach - bp_outa) , np.multiply(UpperCAmelCase_ , (1 - bp_outa)))
a : Dict = np.multiply(
np.dot(UpperCAmelCase_ , self.wkj) , np.multiply(UpperCAmelCase_ , (1 - bp_outa)))
a : List[str] = np.dot(UpperCAmelCase_ , self.vji)
a : Any = pd_i_all / (self.size_poolinga * self.size_poolinga)
a : Any = pd_conva_pooled.T.getA().tolist()
a : Dict = self._calculate_gradient_from_pool(
UpperCAmelCase_ , UpperCAmelCase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1]):
a : Tuple = self._expand_mat(pd_conva_all[k_conv])
a : Any = self.rate_weight * np.dot(UpperCAmelCase_ , UpperCAmelCase_)
a : List[str] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]))
a : Tuple = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv]) * self.rate_thre
)
# all connected layer
a : Tuple = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
a : List[str] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
a : str = self.thre_bpa - pd_k_all * self.rate_thre
a : Optional[Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
a : Union[str, Any] = np.sum(abs(data_teach - bp_outa))
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
a : Union[str, Any] = rp + 1
a : Tuple = error_count / patterns
all_mse.append(UpperCAmelCase_)
def draw_error():
a : Tuple = [error_accuracy for i in range(int(n_repeat * 1.2))]
plt.plot(UpperCAmelCase_ , '+-')
plt.plot(UpperCAmelCase_ , 'r--')
plt.xlabel('Learning Times')
plt.ylabel('All_mse')
plt.grid(UpperCAmelCase_ , alpha=0.5)
plt.show()
print('------------------Training Complished---------------------')
print((' - - Training epoch: ', rp, f""" - - Mse: {mse:.6f}"""))
if draw_e:
draw_error()
return mse
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : Dict = []
print('-------------------Start Testing-------------------------')
print((' - - Shape: Test_Data ', np.shape(UpperCAmelCase_)))
for p in range(len(UpperCAmelCase_)):
a : int = np.asmatrix(datas_test[p])
a , a : int = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a : Tuple = self.pooling(UpperCAmelCase_ , self.size_poolinga)
a : str = self._expand(UpperCAmelCase_)
a : str = data_bp_input
a : Union[str, Any] = bp_outa * self.vji.T - self.thre_bpa
a : Union[str, Any] = self.sig(UpperCAmelCase_)
a : List[Any] = bp_outa * self.wkj.T - self.thre_bpa
a : Tuple = self.sig(UpperCAmelCase_)
produce_out.extend(bp_outa.getA().tolist())
a : Optional[int] = [list(map(self.do_round , UpperCAmelCase_)) for each in produce_out]
return np.asarray(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = np.asmatrix(UpperCAmelCase_)
a , a : int = self.convolute(
UpperCAmelCase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
a : Union[str, Any] = self.pooling(UpperCAmelCase_ , self.size_poolinga)
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 345 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 1 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = (DPMSolverSDEScheduler,)
A : List[Any] = 10
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **UpperCAmelCase_ : Any):
"""simple docstring"""
a : Tuple = {
'num_train_timesteps': 1_1_0_0,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**UpperCAmelCase_)
return config
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
for beta_start, beta_end in zip([0.0_00_01, 0.00_01, 0.0_01] , [0.00_02, 0.0_02, 0.02]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : str = self.scheduler_classes[0]
a : Optional[Any] = self.get_scheduler_config()
a : str = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps)
a : Union[str, Any] = self.dummy_model()
a : List[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
a : Optional[int] = sample.to(UpperCAmelCase_)
for i, t in enumerate(scheduler.timesteps):
a : str = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
a : int = model(UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[int] = output.prev_sample
a : List[Any] = torch.sum(torch.abs(UpperCAmelCase_))
a : Any = torch.mean(torch.abs(UpperCAmelCase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75) < 1e-2
assert abs(result_mean.item() - 0.21_78_70_59_64_56_52_77) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_68_92_29_96_52) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = self.scheduler_classes[0]
a : int = self.get_scheduler_config(prediction_type='v_prediction')
a : Union[str, Any] = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps)
a : Tuple = self.dummy_model()
a : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
a : List[str] = sample.to(UpperCAmelCase_)
for i, t in enumerate(scheduler.timesteps):
a : Optional[int] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[Any] = model(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : List[str] = output.prev_sample
a : int = torch.sum(torch.abs(UpperCAmelCase_))
a : Any = torch.mean(torch.abs(UpperCAmelCase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53) < 1e-2
assert abs(result_mean.item() - 0.1_62_26_28_90_14_81_62_84) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03) < 1e-2
assert abs(result_mean.item() - 0.1_66_88_32_60_01_16_72_97) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25) < 1e-2
assert abs(result_mean.item() - 0.15_60_53_06_62_53_66_21) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[str] = self.scheduler_classes[0]
a : List[Any] = self.get_scheduler_config()
a : Optional[int] = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_)
a : str = self.dummy_model()
a : List[str] = self.dummy_sample_deter.to(UpperCAmelCase_) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
a : Union[str, Any] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = model(UpperCAmelCase_ , UpperCAmelCase_)
a : Tuple = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : List[str] = output.prev_sample
a : Any = torch.sum(torch.abs(UpperCAmelCase_))
a : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38) < 1e-2
assert abs(result_mean.item() - 0.2_18_05_93_46_07_98_26_35) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12) < 1e-2
assert abs(result_mean.item() - 0.2_23_42_90_83_82_41_57_71) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62) < 1e-2
assert abs(result_mean.item() - 0.2_11_61_95_70_85_13_26) < 1e-3
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.scheduler_classes[0]
a : Any = self.get_scheduler_config()
a : Union[str, Any] = scheduler_class(**UpperCAmelCase_ , use_karras_sigmas=UpperCAmelCase_)
scheduler.set_timesteps(self.num_inference_steps , device=UpperCAmelCase_)
a : List[str] = self.dummy_model()
a : Union[str, Any] = self.dummy_sample_deter.to(UpperCAmelCase_) * scheduler.init_noise_sigma
a : int = sample.to(UpperCAmelCase_)
for t in scheduler.timesteps:
a : Optional[Any] = scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
a : Any = model(UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[Any] = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = output.prev_sample
a : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_))
a : int = torch.mean(torch.abs(UpperCAmelCase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72) < 1e-2
assert abs(result_mean.item() - 0.2_30_03_87_27_30_98_18_11) < 1e-2
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 1 |
'''simple docstring'''
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Any = BertJapaneseTokenizer
A : List[Any] = False
A : Any = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
super().setUp()
a : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'こんにちは',
'こん',
'にちは',
'ばんは',
'##こん',
'##にちは',
'##ばんは',
'世界',
'##世界',
'、',
'##、',
'。',
'##。',
]
a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Dict = 'こんにちは、世界。 \nこんばんは、世界。'
a : Any = 'こんにちは 、 世界 。 こんばんは 、 世界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a , a : Dict = self.get_input_output_texts(UpperCAmelCase_)
a : str = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_)
a : str = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_)
return text, ids
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = self.tokenizer_class(self.vocab_file)
a : str = tokenizer.tokenize('こんにちは、世界。\nこんばんは、世界。')
self.assertListEqual(UpperCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='mecab')
self.assertIsNotNone(UpperCAmelCase_)
a : Optional[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : Any = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
a : Union[str, Any] = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(UpperCAmelCase_ , 'wb') as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
with open(UpperCAmelCase_ , 'rb') as handle:
a : Optional[int] = pickle.load(UpperCAmelCase_)
a : List[str] = tokenizer_new.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : int = MecabTokenizer(mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
try:
a : List[Any] = MecabTokenizer(mecab_dic='unidic_lite')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
try:
a : List[Any] = MecabTokenizer(mecab_dic='unidic')
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[Any] = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iphone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
try:
a : Any = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option='-d /usr/local/lib/mecab/dic/jumandic')
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '\u3000', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[int] = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic='ipadic')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップルストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', ' ', '。'] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Dict = self.tokenizer_class(self.vocab_file , word_tokenizer_type='sudachi')
self.assertIsNotNone(UpperCAmelCase_)
a : List[str] = 'こんにちは、世界。\nこんばんは、世界。'
a : Any = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
a : int = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(UpperCAmelCase_ , 'wb') as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
with open(UpperCAmelCase_ , 'rb') as handle:
a : Optional[int] = pickle.load(UpperCAmelCase_)
a : Tuple = tokenizer_new.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Tuple = SudachiTokenizer(sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : int = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='A')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国', '人', '参政', '権'])
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='B')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国人', '参政権'])
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Dict = SudachiTokenizer(sudachi_dict_type='core' , sudachi_split_mode='C')
self.assertListEqual(tokenizer.tokenize('外国人参政権') , ['外国人参政権'])
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iphone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', ' ', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Dict = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , [' ', '\t', 'アップル', 'ストア', 'で', 'iPhone', '8', ' ', 'が', ' ', ' ', '\n ', '発売', 'さ', 'れ', 'た', '\u3000', '。', ' ', ' '] , )
@require_sudachi
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[Any] = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type='core')
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れ', 'た', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = self.tokenizer_class(self.vocab_file , word_tokenizer_type='jumanpp')
self.assertIsNotNone(UpperCAmelCase_)
a : List[Any] = 'こんにちは、世界。\nこんばんは、世界。'
a : int = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , ['こんにちは', '、', '世界', '。', 'こん', '##ばんは', '、', '世界', '。'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4])
a : str = os.path.join(self.tmpdirname , 'tokenizer.bin')
with open(UpperCAmelCase_ , 'wb') as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_)
with open(UpperCAmelCase_ , 'rb') as handle:
a : Optional[Any] = pickle.load(UpperCAmelCase_)
a : Union[str, Any] = tokenizer_new.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Dict = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[str] = JumanppTokenizer(do_lower_case=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iphone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Any = JumanppTokenizer(normalize_text=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['ア', 'ッ', 'フ', '゚', 'ル', 'ストア', 'で', 'iPhone', '8', '\u3000', 'が', '\u3000', '\u3000', '\u3000', '発売', 'さ', 'れた', '\u3000', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = JumanppTokenizer(trim_whitespace=UpperCAmelCase_)
self.assertListEqual(
tokenizer.tokenize(' \tアップルストアでiPhone8 が \n 発売された 。 ') , ['アップル', 'ストア', 'で', 'iPhone', '8', 'が', '発売', 'さ', 'れた', '。'] , )
@require_jumanpp
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : str = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize('ありがとうございますm(_ _)m見つけるのが大変です。') , ['ありがとう', 'ございます', 'm(_ _)m', '見つける', 'の', 'が', '大変です', '。'] , )
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = ['[UNK]', '[CLS]', '[SEP]', 'こんにちは', 'こん', 'にちは', 'ばんは', '##こん', '##にちは', '##ばんは']
a : Tuple = {}
for i, token in enumerate(UpperCAmelCase_):
a : List[str] = i
a : int = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('こんにちは') , ['こんにちは'])
self.assertListEqual(tokenizer.tokenize('こんばんは') , ['こん', '##ばんは'])
self.assertListEqual(tokenizer.tokenize('こんばんは こんばんにちは こんにちは') , ['こん', '##ばんは', '[UNK]', 'こんにちは'])
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = BertJapaneseTokenizer.from_pretrained('nlp-waseda/roberta-base-japanese-with-auto-jumanpp')
a : Union[str, Any] = tokenizer.subword_tokenizer
a : Any = subword_tokenizer.tokenize('国境 の 長い トンネル を 抜ける と 雪国 であった 。')
self.assertListEqual(UpperCAmelCase_ , ['▁国境', '▁の', '▁長い', '▁トンネル', '▁を', '▁抜ける', '▁と', '▁雪', '国', '▁であった', '▁。'])
a : Any = subword_tokenizer.tokenize('こんばんは こんばん にち は こんにちは')
self.assertListEqual(UpperCAmelCase_ , ['▁こん', 'ばん', 'は', '▁こん', 'ばん', '▁に', 'ち', '▁は', '▁こんにちは'])
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : int = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese')
a : int = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase_)
a : Union[str, Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase_)
a : Optional[Any] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
a : Dict = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = BertJapaneseTokenizer
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
super().setUp()
a : int = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **UpperCAmelCase_ : str):
"""simple docstring"""
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type='character' , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Optional[Any] = 'こんにちは、世界。 \nこんばんは、世界。'
a : Optional[Any] = 'こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : str = self.tokenizer_class(self.vocab_file , subword_tokenizer_type='character')
a : Optional[int] = tokenizer.tokenize('こんにちは、世界。 \nこんばんは、世界。')
self.assertListEqual(
UpperCAmelCase_ , ['こ', 'ん', 'に', 'ち', 'は', '、', '世', '界', '。', 'こ', 'ん', 'ば', 'ん', 'は', '、', '世', '界', '。'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2])
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Tuple = ['[UNK]', '[CLS]', '[SEP]', 'こ', 'ん', 'に', 'ち', 'は', 'ば', '世', '界', '、', '。']
a : Dict = {}
for i, token in enumerate(UpperCAmelCase_):
a : Optional[int] = i
a : List[Any] = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('こんにちは') , ['こ', 'ん', 'に', 'ち', 'は'])
self.assertListEqual(tokenizer.tokenize('こんにちほ') , ['こ', 'ん', 'に', 'ち', '[UNK]'])
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Tuple = self.tokenizer_class.from_pretrained('cl-tohoku/bert-base-japanese-char')
a : int = tokenizer.encode('ありがとう。' , add_special_tokens=UpperCAmelCase_)
a : Optional[Any] = tokenizer.encode('どういたしまして。' , add_special_tokens=UpperCAmelCase_)
a : str = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
a : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = 'cl-tohoku/bert-base-japanese'
a : Tuple = AutoTokenizer.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = 'cl-tohoku/bert-base-japanese'
with self.assertLogs('transformers' , level='WARNING') as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_)
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.'))
a : Dict = 'bert-base-cased'
with self.assertLogs('transformers' , level='WARNING') as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_)
self.assertTrue(
cm.records[0].message.startswith(
'The tokenizer class you load from this checkpoint is not the same type as the class this function'
' is called from.'))
| 345 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 1 |
'''simple docstring'''
from collections import defaultdict
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> bool:
"""simple docstring"""
a : Optional[int] = first_str.lower().strip()
a : Tuple = second_str.lower().strip()
# Remove whitespace
a : List[str] = first_str.replace(' ' , '' )
a : str = second_str.replace(' ' , '' )
# Strings of different lengths are not anagrams
if len(snake_case ) != len(snake_case ):
return False
# Default values for count should be 0
a : defaultdict[str, int] = defaultdict(snake_case )
# For each character in input strings,
# increment count in the corresponding
for i in range(len(snake_case ) ):
count[first_str[i]] += 1
count[second_str[i]] -= 1
return all(_count == 0 for _count in count.values() )
if __name__ == "__main__":
from doctest import testmod
testmod()
UpperCamelCase : List[Any] = input("""Enter the first string """).strip()
UpperCamelCase : Tuple = input("""Enter the second string """).strip()
UpperCamelCase : Any = check_anagrams(input_a, input_b)
print(f'''{input_a} and {input_b} are {"" if status else "not "}anagrams.''')
| 345 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> list[int]:
"""simple docstring"""
if num <= 0:
raise ValueError('Input must be a positive integer' )
a : Optional[Any] = [True] * (num + 1)
a : Union[str, Any] = 2
while p * p <= num:
if primes[p]:
for i in range(p * p , num + 1 , snake_case ):
a : List[Any] = False
p += 1
return [prime for prime in range(2 , num + 1 ) if primes[prime]]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Union[str, Any] = int(input("""Enter a positive integer: """).strip())
print(prime_sieve_eratosthenes(user_num))
| 345 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : Tuple=3_2 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=1_6 , UpperCAmelCase_ : int=[1, 2, 1] , UpperCAmelCase_ : int=[2, 2, 4] , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : Any=2.0 , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=0.0 , UpperCAmelCase_ : str=0.0 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : int=True , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : int=1e-5 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : int=1_0 , UpperCAmelCase_ : Any=8 , ):
"""simple docstring"""
a : str = parent
a : Optional[int] = batch_size
a : Dict = image_size
a : List[Any] = patch_size
a : int = num_channels
a : Any = embed_dim
a : Any = depths
a : List[Any] = num_heads
a : Dict = window_size
a : List[str] = mlp_ratio
a : Optional[Any] = qkv_bias
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : List[Any] = drop_path_rate
a : Optional[Any] = hidden_act
a : Optional[int] = use_absolute_embeddings
a : List[str] = patch_norm
a : Union[str, Any] = layer_norm_eps
a : str = initializer_range
a : Optional[Any] = is_training
a : str = scope
a : int = use_labels
a : str = type_sequence_label_size
a : Dict = encoder_stride
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : str = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = SwinvaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
a : Optional[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1))
a : Optional[Any] = int(config.embed_dim * 2 ** (len(config.depths) - 1))
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim))
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : List[Any] = SwinvaForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : Optional[int] = 1
a : List[str] = SwinvaForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : List[str] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = self.type_sequence_label_size
a : Optional[Any] = SwinvaForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
a , a , a : Optional[int] = config_and_inputs
a : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Optional[int] = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
A : Tuple = (
{"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification}
if is_torch_available()
else {}
)
A : Tuple = False
A : Union[str, Any] = False
A : Optional[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = SwinvaModelTester(self)
a : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , embed_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : List[str] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : str = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Any = True
for model_class in self.all_model_classes:
a : Union[str, Any] = True
a : List[Any] = False
a : int = True
a : Tuple = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : List[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Tuple = outputs.attentions
a : Union[str, Any] = len(self.model_tester.depths)
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a : str = True
a : Optional[int] = config.window_size**2
a : Dict = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Union[str, Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Optional[Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
a : Union[str, Any] = len(UpperCAmelCase_)
# Check attention is always last and order is fine
a : Tuple = True
a : Optional[int] = True
a : List[str] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Any = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
if hasattr(self.model_tester , 'num_hidden_states_types'):
a : Optional[int] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
a : Optional[int] = 2
self.assertEqual(out_len + added_hidden_states , len(UpperCAmelCase_))
a : Union[str, Any] = outputs.attentions
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : Tuple = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
a : Optional[Any] = model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
a : Union[str, Any] = outputs.hidden_states
a : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths) + 1)
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
# Swinv2 has a different seq_length
a : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
a : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
a : Any = outputs.reshaped_hidden_states
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
a , a , a , a : List[Any] = reshaped_hidden_states[0].shape
a : int = (
reshaped_hidden_states[0].view(UpperCAmelCase_ , UpperCAmelCase_ , height * width).permute(0 , 2 , 1)
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , )
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a , a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
a : str = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Optional[Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = 3
a : Tuple = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable)
else (self.model_tester.image_size, self.model_tester.image_size)
)
a : Union[str, Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable)
else (config.patch_size, config.patch_size)
)
a : List[str] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
a : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
a : int = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width))
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a : Union[str, Any] = True
self.check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , (padded_height, padded_width))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = SwinvaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : int = self.model_tester.prepare_config_and_inputs_for_common()
a : Dict = _config_zero_init(UpperCAmelCase_)
for model_class in self.all_model_classes:
a : int = model_class(config=UpperCAmelCase_)
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Any = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256').to(
UpperCAmelCase_)
a : Dict = self.default_image_processor
a : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png')
a : List[Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : Any = model(**UpperCAmelCase_)
# verify the logits
a : Any = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.39_47, -0.43_06, 0.00_26]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
| 345 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 1 |
'''simple docstring'''
import random
from typing import Any
def SCREAMING_SNAKE_CASE__ ( snake_case : list ) -> list[Any]:
"""simple docstring"""
for _ in range(len(snake_case ) ):
a : Union[str, Any] = random.randint(0 , len(snake_case ) - 1 )
a : Optional[int] = random.randint(0 , len(snake_case ) - 1 )
a , a : Dict = data[b], data[a]
return data
if __name__ == "__main__":
UpperCamelCase : Optional[int] = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCamelCase : Optional[int] = ["""python""", """says""", """hello""", """!"""]
print("""Fisher-Yates Shuffle:""")
print("""List""", integers, strings)
print("""FY Shuffle""", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 345 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> int:
"""simple docstring"""
a : Optional[Any] = [0] * len(snake_case )
a : List[str] = []
a : Union[str, Any] = []
a : List[Any] = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(snake_case ) ):
if indegree[i] == 0:
queue.append(snake_case )
while queue:
a : str = queue.pop(0 )
cnt += 1
topo.append(snake_case )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(snake_case )
if cnt != len(snake_case ):
print('Cycle exists' )
else:
print(snake_case )
# Adjacency List of Graph
UpperCamelCase : int = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 345 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 1 |
'''simple docstring'''
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
UpperCamelCase : List[str] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Tuple:
"""simple docstring"""
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Optional[Any] , snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return max(metric_fn(snake_case , snake_case ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Union[str, Any] , snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
a : Optional[Any] = [line.strip() for line in open(snake_case , 'r' ).readlines()]
a : List[str] = []
if args.gold_data_mode == "qa":
a : Optional[int] = pd.read_csv(snake_case , sep='\t' , header=snake_case )
for answer_list in data[1]:
a : str = ast.literal_eval(snake_case )
answers.append(snake_case )
else:
a : List[Any] = [line.strip() for line in open(snake_case , 'r' ).readlines()]
a : Optional[int] = [[reference] for reference in references]
a : Union[str, Any] = 0
for prediction, ground_truths in zip(snake_case , snake_case ):
total += 1
em += metric_max_over_ground_truths(snake_case , snake_case , snake_case )
fa += metric_max_over_ground_truths(snake_case , snake_case , snake_case )
a : Optional[int] = 1_00.0 * em / total
a : Union[str, Any] = 1_00.0 * fa / total
logger.info(F"""F1: {fa:.2f}""" )
logger.info(F"""EM: {em:.2f}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : Dict ) -> Dict:
"""simple docstring"""
a : List[str] = args.k
a : Optional[Any] = [line.strip() for line in open(snake_case , 'r' ).readlines()]
a : int = [line.strip() for line in open(snake_case , 'r' ).readlines()]
a : str = 0
for hypo, reference in zip(snake_case , snake_case ):
a : Optional[int] = set(hypo.split('\t' )[:k] )
a : int = set(reference.split('\t' ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
a : Any = 1_00.0 * em / total
logger.info(F"""Precision@{k}: {em: .2f}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Optional[Any] , snake_case : str ) -> Optional[Any]:
"""simple docstring"""
def strip_title(snake_case : List[Any] ):
if title.startswith('"' ):
a : List[str] = title[1:]
if title.endswith('"' ):
a : Any = title[:-1]
return title
a : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case , return_tensors='pt' , padding=snake_case , truncation=snake_case , )['input_ids'].to(args.device )
a : List[str] = rag_model.rag.question_encoder(snake_case )
a : int = question_enc_outputs[0]
a : Dict = rag_model.retriever(
snake_case , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors='pt' , )
a : int = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
a : List[str] = []
for docs in all_docs:
a : Tuple = [strip_title(snake_case ) for title in docs['title']]
provenance_strings.append('\t'.join(snake_case ) )
return provenance_strings
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : List[str] , snake_case : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
with torch.no_grad():
a : Any = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case , return_tensors='pt' , padding=snake_case , truncation=snake_case )
a : Dict = inputs_dict.input_ids.to(args.device )
a : int = inputs_dict.attention_mask.to(args.device )
a : Any = rag_model.generate( # rag_model overwrites generate
snake_case , attention_mask=snake_case , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
a : List[Any] = rag_model.retriever.generator_tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
if args.print_predictions:
for q, a in zip(snake_case , snake_case ):
logger.info('Q: {} - A: {}'.format(snake_case , snake_case ) )
return answers
def SCREAMING_SNAKE_CASE__ ( ) -> int:
"""simple docstring"""
a : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'--model_type' , choices=['rag_sequence', 'rag_token', 'bart'] , type=snake_case , help=(
'RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the'
' model_name_or_path'
) , )
parser.add_argument(
'--index_name' , default=snake_case , choices=['exact', 'compressed', 'legacy'] , type=snake_case , help='RAG model retriever type' , )
parser.add_argument(
'--index_path' , default=snake_case , type=snake_case , help='Path to the retrieval index' , )
parser.add_argument('--n_docs' , default=5 , type=snake_case , help='Number of retrieved docs' )
parser.add_argument(
'--model_name_or_path' , default=snake_case , type=snake_case , required=snake_case , help='Path to pretrained checkpoints or model identifier from huggingface.co/models' , )
parser.add_argument(
'--eval_mode' , choices=['e2e', 'retrieval'] , default='e2e' , type=snake_case , help=(
'Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates'
' precision@k.'
) , )
parser.add_argument('--k' , default=1 , type=snake_case , help='k for the precision@k calculation' )
parser.add_argument(
'--evaluation_set' , default=snake_case , type=snake_case , required=snake_case , help='Path to a file containing evaluation samples' , )
parser.add_argument(
'--gold_data_path' , default=snake_case , type=snake_case , required=snake_case , help='Path to a tab-separated file with gold samples' , )
parser.add_argument(
'--gold_data_mode' , default='qa' , type=snake_case , choices=['qa', 'ans'] , help=(
'Format of the gold data file'
'qa - a single line in the following format: question [tab] answer_list'
'ans - a single line of the gold file contains the expected answer string'
) , )
parser.add_argument(
'--predictions_path' , type=snake_case , default='predictions.txt' , help='Name of the predictions file, to be stored in the checkpoints directory' , )
parser.add_argument(
'--eval_all_checkpoints' , action='store_true' , help='Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number' , )
parser.add_argument(
'--eval_batch_size' , default=8 , type=snake_case , help='Batch size per GPU/CPU for evaluation.' , )
parser.add_argument(
'--recalculate' , help='Recalculate predictions even if the prediction file exists' , action='store_true' , )
parser.add_argument(
'--num_beams' , default=4 , type=snake_case , help='Number of beams to be used when generating answers' , )
parser.add_argument('--min_length' , default=1 , type=snake_case , help='Min length of the generated answers' )
parser.add_argument('--max_length' , default=50 , type=snake_case , help='Max length of the generated answers' )
parser.add_argument(
'--print_predictions' , action='store_true' , help='If True, prints predictions while evaluating.' , )
parser.add_argument(
'--print_docs' , action='store_true' , help='If True, prints docs retried while generating.' , )
a : Union[str, Any] = parser.parse_args()
a : Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
return args
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ) -> str:
"""simple docstring"""
a : List[str] = {}
if args.model_type is None:
a : int = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith('rag' ):
a : str = RagTokenForGeneration if args.model_type == 'rag_token' else RagSequenceForGeneration
a : Any = args.n_docs
if args.index_name is not None:
a : List[Any] = args.index_name
if args.index_path is not None:
a : Union[str, Any] = args.index_path
else:
a : Optional[int] = BartForConditionalGeneration
a : Tuple = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info('Evaluate the following checkpoints: %s' , snake_case )
a : Optional[Any] = get_scores if args.eval_mode == 'e2e' else get_precision_at_k
a : List[str] = evaluate_batch_eae if args.eval_mode == 'e2e' else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info('Calculating metrics based on an existing predictions file: {}'.format(args.predictions_path ) )
score_fn(snake_case , args.predictions_path , args.gold_data_path )
continue
logger.info('***** Running evaluation for {} *****'.format(snake_case ) )
logger.info(' Batch size = %d' , args.eval_batch_size )
logger.info(' Predictions will be stored under {}'.format(args.predictions_path ) )
if args.model_type.startswith('rag' ):
a : List[Any] = RagRetriever.from_pretrained(snake_case , **snake_case )
a : str = model_class.from_pretrained(snake_case , retriever=snake_case , **snake_case )
model.retriever.init_retrieval()
else:
a : Dict = model_class.from_pretrained(snake_case , **snake_case )
model.to(args.device )
with open(args.evaluation_set , 'r' ) as eval_file, open(args.predictions_path , 'w' ) as preds_file:
a : str = []
for line in tqdm(snake_case ):
questions.append(line.strip() )
if len(snake_case ) == args.eval_batch_size:
a : Any = evaluate_batch_fn(snake_case , snake_case , snake_case )
preds_file.write('\n'.join(snake_case ) + '\n' )
preds_file.flush()
a : str = []
if len(snake_case ) > 0:
a : Any = evaluate_batch_fn(snake_case , snake_case , snake_case )
preds_file.write('\n'.join(snake_case ) )
preds_file.flush()
score_fn(snake_case , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = get_args()
main(args)
| 345 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Optional[int] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[Any] = [
'safety_checker/pytorch_model.bin',
'safety_checker/model.safetensors',
'vae/diffusion_pytorch_model.bin',
'vae/diffusion_pytorch_model.safetensors',
'text_encoder/pytorch_model.bin',
# Removed: 'text_encoder/model.safetensors',
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Optional[Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : List[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : str = [
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : str = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = [
'unet/diffusion_pytorch_model.bin',
'unet/diffusion_pytorch_model.safetensors',
]
a : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Union[str, Any] = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = [
'text_encoder/pytorch_model.fp16.bin',
'text_encoder/model.fp16.safetensors',
]
a : Optional[Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[Any] = [
'text_encoder/pytorch_model.bin',
'text_encoder/model.safetensors',
]
a : Union[str, Any] = 'fp16'
self.assertTrue(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = [
'safety_checker/pytorch_model.fp16.bin',
'safety_checker/model.fp16.safetensors',
'vae/diffusion_pytorch_model.fp16.bin',
'vae/diffusion_pytorch_model.fp16.safetensors',
'text_encoder/pytorch_model.fp16.bin',
# 'text_encoder/model.fp16.safetensors',
'unet/diffusion_pytorch_model.fp16.bin',
'unet/diffusion_pytorch_model.fp16.safetensors',
]
a : Any = 'fp16'
self.assertFalse(is_safetensors_compatible(UpperCAmelCase_ , variant=UpperCAmelCase_))
| 345 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 1 |
'''simple docstring'''
import os
def SCREAMING_SNAKE_CASE__ ( snake_case : str = "input.txt" ) -> int:
"""simple docstring"""
with open(os.path.join(os.path.dirname(snake_case ) , snake_case ) ) as input_file:
a : Any = [
[int(snake_case ) for element in line.split(',' )]
for line in input_file.readlines()
]
a : Tuple = len(snake_case )
a : Optional[Any] = len(matrix[0] )
a : int = [[-1 for _ in range(snake_case )] for _ in range(snake_case )]
for i in range(snake_case ):
a : Tuple = matrix[i][0]
for j in range(1 , snake_case ):
for i in range(snake_case ):
a : str = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , snake_case ):
a : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
a : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 345 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : str = ["input_features", "attention_mask"]
def __init__( self : Any , UpperCAmelCase_ : Dict=8_0 , UpperCAmelCase_ : List[Any]=1_6_0_0_0 , UpperCAmelCase_ : Optional[Any]=8_0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Union[str, Any]=True , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(feature_size=UpperCAmelCase_ , sampling_rate=UpperCAmelCase_ , padding_value=UpperCAmelCase_ , **UpperCAmelCase_)
a : Dict = num_mel_bins
a : List[str] = do_ceptral_normalize
a : Dict = normalize_means
a : int = normalize_vars
a : Any = True
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : np.ndarray , ):
"""simple docstring"""
a : str = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
a : str = torch.from_numpy(UpperCAmelCase_).unsqueeze(0)
a : str = ta_kaldi.fbank(UpperCAmelCase_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate)
return features.numpy()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : float = 0.0 , ):
"""simple docstring"""
if normalize_means:
a : int = x[:input_length].mean(axis=0)
a : Any = np.subtract(UpperCAmelCase_ , UpperCAmelCase_)
if normalize_vars:
a : List[str] = x[:input_length].std(axis=0)
a : Optional[int] = np.divide(UpperCAmelCase_ , UpperCAmelCase_)
if input_length < x.shape[0]:
a : List[str] = padding_value
# make sure array is in float32
a : Optional[int] = x.astype(np.floataa)
return x
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[np.ndarray] , UpperCAmelCase_ : Optional[np.ndarray] = None):
"""simple docstring"""
a : int = attention_mask.sum(-1) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(UpperCAmelCase_ , UpperCAmelCase_ , self.normalize_means , self.normalize_vars , self.padding_value)
for x, n in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
def __call__( self : str , UpperCAmelCase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""")
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.')
a : str = isinstance(UpperCAmelCase_ , np.ndarray) and len(raw_speech.shape) > 1
if is_batched_numpy and len(raw_speech.shape) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""")
a : Tuple = is_batched_numpy or (
isinstance(UpperCAmelCase_ , (list, tuple)) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list)))
)
if is_batched:
a : Optional[Any] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa) for speech in raw_speech]
elif not is_batched and not isinstance(UpperCAmelCase_ , np.ndarray):
a : Tuple = np.asarray(UpperCAmelCase_ , dtype=np.floataa)
elif isinstance(UpperCAmelCase_ , np.ndarray) and raw_speech.dtype is np.dtype(np.floataa):
a : str = raw_speech.astype(np.floataa)
# always return batch
if not is_batched:
a : List[Any] = [raw_speech]
# extract fbank features
a : Any = [self._extract_fbank_features(UpperCAmelCase_) for waveform in raw_speech]
# convert into correct format for padding
a : List[str] = BatchFeature({'input_features': features})
a : List[Any] = self.pad(
UpperCAmelCase_ , padding=UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , **UpperCAmelCase_ , )
# make sure list is in array format
a : Optional[int] = padded_inputs.get('input_features')
if isinstance(input_features[0] , UpperCAmelCase_):
a : List[str] = [np.asarray(UpperCAmelCase_ , dtype=np.floataa) for feature in input_features]
a : str = padded_inputs.get('attention_mask')
if attention_mask is not None:
a : Tuple = [np.asarray(UpperCAmelCase_ , dtype=np.intaa) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
a : Tuple = (
np.array(UpperCAmelCase_ , dtype=np.intaa)
if self._get_padding_strategies(UpperCAmelCase_ , max_length=UpperCAmelCase_) is not PaddingStrategy.DO_NOT_PAD
else None
)
a : Tuple = self.normalize(
padded_inputs['input_features'] , attention_mask=UpperCAmelCase_)
if return_tensors is not None:
a : int = padded_inputs.convert_to_tensors(UpperCAmelCase_)
return padded_inputs
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
UpperCamelCase : Union[str, Any] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase_ : Union[List[ControlNetModel], Tuple[ControlNetModel]]):
"""simple docstring"""
super().__init__()
a : Optional[Any] = nn.ModuleList(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : torch.FloatTensor , UpperCAmelCase_ : Union[torch.Tensor, float, int] , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : List[torch.tensor] , UpperCAmelCase_ : List[float] , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[torch.Tensor] = None , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(UpperCAmelCase_ , UpperCAmelCase_ , self.nets)):
a , a : Union[str, Any] = controlnet(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# merge samples
if i == 0:
a , a : Tuple = down_samples, mid_sample
else:
a : Optional[int] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Union[str, os.PathLike] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Callable = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
a : Any = 0
a : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
UpperCAmelCase_ , is_main_process=UpperCAmelCase_ , save_function=UpperCAmelCase_ , safe_serialization=UpperCAmelCase_ , variant=UpperCAmelCase_ , )
idx += 1
a : str = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Tuple , UpperCAmelCase_ : Optional[Union[str, os.PathLike]] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[int] = 0
a : Any = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
a : List[str] = pretrained_model_path
while os.path.isdir(UpperCAmelCase_):
a : Optional[int] = ControlNetModel.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
controlnets.append(UpperCAmelCase_)
idx += 1
a : Optional[int] = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(UpperCAmelCase_)} controlnets loaded from {pretrained_model_path}.""")
if len(UpperCAmelCase_) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(UpperCAmelCase_)}. Expected at least {pretrained_model_path + '_0'}.""")
return cls(UpperCAmelCase_)
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : str = {
"""tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""",
"""tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "falcon"
A : Optional[int] = ["past_key_values"]
def __init__( self : str , UpperCAmelCase_ : str=6_5_0_2_4 , UpperCAmelCase_ : List[Any]=4_5_4_4 , UpperCAmelCase_ : str=3_2 , UpperCAmelCase_ : int=7_1 , UpperCAmelCase_ : List[Any]=1e-5 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Any=0.0 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Optional[int]=1_1 , UpperCAmelCase_ : List[Any]=1_1 , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
a : Optional[int] = vocab_size
# Backward compatibility with n_embed kwarg
a : Optional[int] = kwargs.pop('n_embed' , UpperCAmelCase_)
a : int = hidden_size if n_embed is None else n_embed
a : Dict = num_hidden_layers
a : Tuple = num_attention_heads
a : List[Any] = layer_norm_epsilon
a : Tuple = initializer_range
a : Any = use_cache
a : str = hidden_dropout
a : Optional[Any] = attention_dropout
a : List[str] = bos_token_id
a : List[str] = eos_token_id
a : List[Any] = num_attention_heads if num_kv_heads is None else num_kv_heads
a : Dict = alibi
a : Any = new_decoder_architecture
a : int = multi_query # Ignored when new_decoder_architecture is True
a : str = parallel_attn
a : Optional[Any] = bias
super().__init__(bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return not self.alibi
| 345 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 1 |
'''simple docstring'''
UpperCamelCase : List[Any] = 256
# Modulus to hash a string
UpperCamelCase : List[Any] = 1_000_003
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> bool:
"""simple docstring"""
a : Dict = len(snake_case )
a : Dict = len(snake_case )
if p_len > t_len:
return False
a : int = 0
a : int = 0
a : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(snake_case ):
a : int = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
a : int = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
a : Dict = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
a : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def SCREAMING_SNAKE_CASE__ ( ) -> None:
"""simple docstring"""
a : Union[str, Any] = 'abc1abc12'
a : Union[str, Any] = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
a : List[Any] = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(snake_case , snake_case ) and not rabin_karp(snake_case , snake_case )
# Test 2)
a : Optional[int] = 'ABABX'
a : Optional[int] = 'ABABZABABYABABX'
assert rabin_karp(snake_case , snake_case )
# Test 3)
a : List[Any] = 'AAAB'
a : int = 'ABAAAAAB'
assert rabin_karp(snake_case , snake_case )
# Test 4)
a : int = 'abcdabcy'
a : List[str] = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(snake_case , snake_case )
# Test 5)
a : int = 'Lü'
a : Tuple = 'Lüsai'
assert rabin_karp(snake_case , snake_case )
a : str = 'Lue'
assert not rabin_karp(snake_case , snake_case )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 345 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase : str = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[Any] = "ernie_m"
A : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : str , UpperCAmelCase_ : int = 2_5_0_0_0_2 , UpperCAmelCase_ : int = 7_6_8 , UpperCAmelCase_ : int = 1_2 , UpperCAmelCase_ : int = 1_2 , UpperCAmelCase_ : int = 3_0_7_2 , UpperCAmelCase_ : str = "gelu" , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : int = 5_1_4 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 1e-05 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=False , UpperCAmelCase_ : List[str]=0.0 , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : int = vocab_size
a : Dict = hidden_size
a : Optional[int] = num_hidden_layers
a : Any = num_attention_heads
a : Tuple = intermediate_size
a : Union[str, Any] = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Union[str, Any] = max_position_embeddings
a : Dict = initializer_range
a : Optional[Any] = layer_norm_eps
a : Any = classifier_dropout
a : List[str] = is_decoder
a : Dict = act_dropout
| 345 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase : Tuple = logging.get_logger(__name__)
UpperCamelCase : List[Any] = torch.device("""cpu""")
def SCREAMING_SNAKE_CASE__ ( ) -> int:
"""simple docstring"""
a : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Tuple = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return im
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Any:
"""simple docstring"""
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3E0_0, 2.1_1_0_7E0_0, -2.0_8_1_1E0_0, 8.8_6_8_5E-0_1, 2.4_3_6_0E-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6E-0_1, 2.3_4_7_8E-0_1, -1.6_9_6_3E0_0, -1.7_3_8_1E0_0, -8.6_3_3_7E-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8E-0_1, -4.7_4_2_9E-0_1, -1.0_8_9_7E0_0, -1.0_2_4_8E0_0, 3.5_5_2_3E-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0E-0_1, 2.4_2_1_1E-0_1, -6.0_1_8_5E-0_1, -8.2_7_8_9E-0_1, -6.0_4_4_6E-0_2] )
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : int , snake_case : Dict ) -> List[Any]:
"""simple docstring"""
a : List[Any] = dct.pop(snake_case )
a : Dict = val
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ) -> Tuple:
"""simple docstring"""
a : Any = []
for k in state_dict.keys():
a : List[str] = k
if ".pwconv" in k:
a : Optional[int] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
a : Dict = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
a : int = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
a : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
a : Any = k_new.split('.' )
if ls[2].isdigit():
a : Any = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
a : Union[str, Any] = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any] , snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
a : Optional[Any] = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
a : Any = 1_000
a : Dict = 'huggingface/label-files'
a : Any = 'imagenet-1k-id2label.json'
a : str = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Dict = {int(snake_case ): v for k, v in idalabel.items()}
a : List[Any] = idalabel
a : List[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
a : int = [3, 3, 6, 4]
a : List[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
a : str = [3, 3, 9, 6]
a : int = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
a : List[str] = [4, 3, 10, 5]
a : int = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
a : Any = [4, 4, 12, 6]
a : Optional[int] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
a : Any = torch.hub.load_state_dict_from_url(snake_case , map_location='cpu' , check_hash=snake_case )
else:
a : List[str] = torch.load(snake_case , map_location='cpu' )
a : List[Any] = checkpoint
a : List[str] = create_rename_keys(snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(snake_case , snake_case , snake_case )
# load HuggingFace model
a : Union[str, Any] = SwiftFormerForImageClassification(snake_case ).eval()
hf_model.load_state_dict(snake_case )
# prepare test inputs
a : List[Any] = prepare_img()
a : Dict = ViTImageProcessor.from_pretrained('preprocessor_config' )
a : str = processor(images=snake_case , return_tensors='pt' )
# compare outputs from both models
a : Any = get_expected_output(snake_case )
a : str = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1_000] )
assert torch.allclose(hf_logits[0, 0:5] , snake_case , atol=1E-3 )
Path(snake_case ).mkdir(exist_ok=snake_case )
print(F"""Saving model {swiftformer_name} to {pytorch_dump_folder_path}""" )
hf_model.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
UpperCamelCase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 345 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 1 |
'''simple docstring'''
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
UpperCamelCase : Union[str, Any] = namedtuple(
"""_TestCommandArgs""",
[
"""dataset""",
"""name""",
"""cache_dir""",
"""data_dir""",
"""all_configs""",
"""save_infos""",
"""ignore_verifications""",
"""force_redownload""",
"""clear_cache""",
],
defaults=[None, None, None, False, False, False, False, False],
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : int ) -> Any:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> Optional[int]:
"""simple docstring"""
a : Union[str, Any] = _TestCommandArgs(dataset=snake_case , all_configs=snake_case , save_infos=snake_case )
a : Dict = TestCommand(*snake_case )
test_command.run()
a : str = os.path.join(snake_case , 'README.md' )
assert os.path.exists(snake_case )
a : Dict = DatasetInfosDict.from_directory(snake_case )
a : str = DatasetInfosDict(
{
'default': DatasetInfo(
features=Features(
{
'tokens': Sequence(Value('string' ) ),
'ner_tags': Sequence(
ClassLabel(names=['O', 'B-PER', 'I-PER', 'B-ORG', 'I-ORG', 'B-LOC', 'I-LOC'] ) ),
'langs': Sequence(Value('string' ) ),
'spans': Sequence(Value('string' ) ),
} ) , splits=[
{
'name': 'train',
'num_bytes': 2_351_563,
'num_examples': 10_000,
},
{
'name': 'validation',
'num_bytes': 238_418,
'num_examples': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
a , a : Any = getattr(dataset_infos['default'] , snake_case ), getattr(expected_dataset_infos['default'] , snake_case )
if key == "num_bytes":
assert is_apercent_close(snake_case , snake_case )
elif key == "splits":
assert list(snake_case ) == list(snake_case )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 345 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 1 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[int] = {"""vocab_file""": """spiece.model"""}
UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""TsinghuaAI/CPM-Generate""": """https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model""",
}
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : str="</s>" , UpperCAmelCase_ : Optional[int]="<unk>" , UpperCAmelCase_ : Any="<sep>" , UpperCAmelCase_ : Optional[int]="<pad>" , UpperCAmelCase_ : Optional[Any]="<cls>" , UpperCAmelCase_ : Optional[Any]="<mask>" , UpperCAmelCase_ : Optional[Any]=["<eop>", "<eod>"] , UpperCAmelCase_ : Optional[Dict[str, Any]] = None , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
a : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCAmelCase_ , remove_space=UpperCAmelCase_ , keep_accents=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , )
a : str = 3
a : str = do_lower_case
a : List[Any] = remove_space
a : List[Any] = keep_accents
a : Dict = vocab_file
a : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(UpperCAmelCase_)
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '
'See https://pypi.org/project/jieba/ for installation.')
a : str = jieba
a : List[Any] = str.maketrans(' \n' , '\u2582\u2583')
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return len(self.sp_model)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : int = {self.convert_ids_to_tokens(UpperCAmelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def __getstate__( self : Any):
"""simple docstring"""
a : List[Any] = self.__dict__.copy()
a : Any = None
return state
def __setstate__( self : Union[str, Any] , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs'):
a : Any = {}
a : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict):
"""simple docstring"""
if self.remove_space:
a : int = ' '.join(inputs.strip().split())
else:
a : Union[str, Any] = inputs
a : Any = outputs.replace('``' , '"').replace('\'\'' , '"')
if not self.keep_accents:
a : Dict = unicodedata.normalize('NFKD' , UpperCAmelCase_)
a : List[str] = ''.join([c for c in outputs if not unicodedata.combining(UpperCAmelCase_)])
if self.do_lower_case:
a : Union[str, Any] = outputs.lower()
return outputs
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : str):
"""simple docstring"""
a : Optional[Any] = self.preprocess_text(UpperCAmelCase_)
a : Tuple = self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_)
a : Optional[int] = []
for piece in pieces:
if len(UpperCAmelCase_) > 1 and piece[-1] == str(',') and piece[-2].isdigit():
a : Optional[int] = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCAmelCase_ , ''))
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0]) == 1:
a : Any = cur_pieces[1:]
else:
a : Tuple = cur_pieces[0][1:]
cur_pieces.append(piece[-1])
new_pieces.extend(UpperCAmelCase_)
else:
new_pieces.append(UpperCAmelCase_)
return new_pieces
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return self.sp_model.PieceToId(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Any):
"""simple docstring"""
return self.sp_model.IdToPiece(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = ''.join(UpperCAmelCase_).replace(UpperCAmelCase_ , ' ').strip()
return out_string
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : Optional[int] = [self.sep_token_id]
a : int = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is not None:
return ([0] * len(UpperCAmelCase_)) + [1] + ([0] * len(UpperCAmelCase_)) + [1, 1]
return ([0] * len(UpperCAmelCase_)) + [1, 1]
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : List[str] = [self.sep_token_id]
a : str = [2]
if token_ids_a is None:
return len(token_ids_a + sep) * [0] + cls_segment_id
return len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] + cls_segment_id
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : Union[str, Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(UpperCAmelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , UpperCAmelCase_)
elif not os.path.isfile(self.vocab_file):
with open(UpperCAmelCase_ , 'wb') as fi:
a : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase_)
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Dict):
"""simple docstring"""
a : Dict = super()._decode(*UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = text.replace(' ' , '').replace('\u2582' , ' ').replace('\u2583' , '\n')
return text
| 345 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 1 |
'''simple docstring'''
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = AutoencoderKL
A : Tuple = "sample"
A : List[Any] = 1E-2
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Tuple = 4
a : Any = 3
a : Optional[int] = (3_2, 3_2)
a : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes).to(UpperCAmelCase_)
return {"sample": image}
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return (3, 3_2, 3_2)
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return (3, 3_2, 3_2)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Tuple = {
'block_out_channels': [3_2, 6_4],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
a : List[Any] = self.dummy_input
return init_dict, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
pass
@unittest.skipIf(torch_device == 'mps' , 'Gradient checkpointing skipped on MPS')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a , a : List[str] = self.prepare_init_args_and_inputs_for_common()
a : Optional[Any] = self.model_class(**UpperCAmelCase_)
model.to(UpperCAmelCase_)
assert not model.is_gradient_checkpointing and model.training
a : List[str] = model(**UpperCAmelCase_).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
a : Union[str, Any] = torch.randn_like(UpperCAmelCase_)
a : str = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
a : Dict = self.model_class(**UpperCAmelCase_)
# clone model
model_a.load_state_dict(model.state_dict())
model_a.to(UpperCAmelCase_)
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
a : int = model_a(**UpperCAmelCase_).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
a : Tuple = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5)
a : Tuple = dict(model.named_parameters())
a : int = dict(model_a.named_parameters())
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a , a : Union[str, Any] = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' , output_loading_info=UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
self.assertEqual(len(loading_info['missing_keys']) , 0)
model.to(UpperCAmelCase_)
a : str = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Tuple = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy')
a : List[Any] = model.to(UpperCAmelCase_)
model.eval()
if torch_device == "mps":
a : List[Any] = torch.manual_seed(0)
else:
a : Tuple = torch.Generator(device=UpperCAmelCase_).manual_seed(0)
a : Any = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
a : Tuple = image.to(UpperCAmelCase_)
with torch.no_grad():
a : Any = model(UpperCAmelCase_ , sample_posterior=UpperCAmelCase_ , generator=UpperCAmelCase_).sample
a : str = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
a : Optional[Any] = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
])
elif torch_device == "cpu":
a : Tuple = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26])
else:
a : List[str] = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85])
self.assertTrue(torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , rtol=1e-2))
@slow
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str):
"""simple docstring"""
return f"""gaussian_noise_s={seed}_shape={'_'.join([str(UpperCAmelCase_) for s in shape])}.npy"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : str=(4, 3, 5_1_2, 5_1_2) , UpperCAmelCase_ : Tuple=False):
"""simple docstring"""
a : Optional[Any] = torch.floataa if fpaa else torch.floataa
a : str = torch.from_numpy(load_hf_numpy(self.get_file_format(UpperCAmelCase_ , UpperCAmelCase_))).to(UpperCAmelCase_).to(UpperCAmelCase_)
return image
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[str]="CompVis/stable-diffusion-v1-4" , UpperCAmelCase_ : int=False):
"""simple docstring"""
a : Any = 'fp16' if fpaa else None
a : Tuple = torch.floataa if fpaa else torch.floataa
a : Union[str, Any] = AutoencoderKL.from_pretrained(
UpperCAmelCase_ , subfolder='vae' , torch_dtype=UpperCAmelCase_ , revision=UpperCAmelCase_ , )
model.to(UpperCAmelCase_).eval()
return model
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=0):
"""simple docstring"""
if torch_device == "mps":
return torch.manual_seed(UpperCAmelCase_)
return torch.Generator(device=UpperCAmelCase_).manual_seed(UpperCAmelCase_)
@parameterized.expand(
[
# fmt: off
[3_3, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[4_7, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
])
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : str = self.get_sd_vae_model()
a : str = self.get_sd_image(UpperCAmelCase_)
a : Optional[int] = self.get_generator(UpperCAmelCase_)
with torch.no_grad():
a : Any = model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_).sample
assert sample.shape == image.shape
a : int = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a : Optional[int] = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice)
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3e-3)
@parameterized.expand(
[
# fmt: off
[3_3, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[4_7, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
])
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : Optional[int] = self.get_sd_vae_model(fpaa=UpperCAmelCase_)
a : List[str] = self.get_sd_image(UpperCAmelCase_ , fpaa=UpperCAmelCase_)
a : List[str] = self.get_generator(UpperCAmelCase_)
with torch.no_grad():
a : int = model(UpperCAmelCase_ , generator=UpperCAmelCase_ , sample_posterior=UpperCAmelCase_).sample
assert sample.shape == image.shape
a : List[Any] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a : Union[str, Any] = torch.tensor(UpperCAmelCase_)
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[3_3, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[4_7, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
])
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : str = self.get_sd_vae_model()
a : Tuple = self.get_sd_image(UpperCAmelCase_)
with torch.no_grad():
a : Any = model(UpperCAmelCase_).sample
assert sample.shape == image.shape
a : Tuple = sample[-1, -2:, -2:, :2].flatten().float().cpu()
a : Optional[Any] = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice)
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=3e-3)
@parameterized.expand(
[
# fmt: off
[1_3, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[3_7, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
])
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Optional[Any] = self.get_sd_vae_model()
a : Optional[int] = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 6_4, 6_4))
with torch.no_grad():
a : int = model.decode(UpperCAmelCase_).sample
assert list(sample.shape) == [3, 3, 5_1_2, 5_1_2]
a : Optional[Any] = sample[-1, -2:, :2, -2:].flatten().cpu()
a : Tuple = torch.tensor(UpperCAmelCase_)
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-3)
@parameterized.expand(
[
# fmt: off
[2_7, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[1_6, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
])
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : Tuple = self.get_sd_vae_model(fpaa=UpperCAmelCase_)
a : int = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase_)
with torch.no_grad():
a : List[str] = model.decode(UpperCAmelCase_).sample
assert list(sample.shape) == [3, 3, 5_1_2, 5_1_2]
a : Optional[int] = sample[-1, -2:, :2, -2:].flatten().float().cpu()
a : str = torch.tensor(UpperCAmelCase_)
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=5e-3)
@parameterized.expand([(1_3,), (1_6,), (2_7,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Any = self.get_sd_vae_model(fpaa=UpperCAmelCase_)
a : Optional[int] = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 6_4, 6_4) , fpaa=UpperCAmelCase_)
with torch.no_grad():
a : Dict = model.decode(UpperCAmelCase_).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a : Optional[Any] = model.decode(UpperCAmelCase_).sample
assert list(sample.shape) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-1)
@parameterized.expand([(1_3,), (1_6,), (3_7,)])
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason='xformers is not required when using PyTorch 2.0.')
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : int):
"""simple docstring"""
a : List[Any] = self.get_sd_vae_model()
a : Optional[int] = self.get_sd_image(UpperCAmelCase_ , shape=(3, 4, 6_4, 6_4))
with torch.no_grad():
a : List[Any] = model.decode(UpperCAmelCase_).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
a : Tuple = model.decode(UpperCAmelCase_).sample
assert list(sample.shape) == [3, 3, 5_1_2, 5_1_2]
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=1e-2)
@parameterized.expand(
[
# fmt: off
[3_3, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[4_7, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
])
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Any = self.get_sd_vae_model()
a : str = self.get_sd_image(UpperCAmelCase_)
a : int = self.get_generator(UpperCAmelCase_)
with torch.no_grad():
a : Optional[int] = model.encode(UpperCAmelCase_).latent_dist
a : Union[str, Any] = dist.sample(generator=UpperCAmelCase_)
assert list(sample.shape) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
a : List[str] = sample[0, -1, -3:, -3:].flatten().cpu()
a : int = torch.tensor(UpperCAmelCase_)
a : Tuple = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(UpperCAmelCase_ , UpperCAmelCase_ , atol=UpperCAmelCase_)
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 1 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Optional[Any] = 'ylacombe/bark-small'
a : Optional[Any] = tempfile.mkdtemp()
a : str = 'en_speaker_1'
a : List[str] = 'This is a test string'
a : Dict = 'speaker_embeddings_path.json'
a : Union[str, Any] = 'speaker_embeddings'
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
shutil.rmtree(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Tuple = self.get_tokenizer()
a : Optional[int] = BarkProcessor(tokenizer=UpperCAmelCase_)
processor.save_pretrained(self.tmpdirname)
a : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab())
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Union[str, Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
a : Any = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)')
a : Dict = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab())
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Optional[int] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
a : Tuple = 3_5
a : int = 2
a : Any = 8
a : Dict = {
'semantic_prompt': np.ones(UpperCAmelCase_),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len)),
'fine_prompt': np.ones((nb_codebooks_total, seq_len)),
}
# test providing already loaded voice_preset
a : List[Any] = processor(text=self.input_string , voice_preset=UpperCAmelCase_)
a : Dict = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from npz file
a : Union[str, Any] = os.path.join(self.tmpdirname , 'file.npz')
np.savez(UpperCAmelCase_ , **UpperCAmelCase_)
a : Optional[int] = processor(text=self.input_string , voice_preset=UpperCAmelCase_)
a : int = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCAmelCase_ , np.array([])).tolist())
# test loading voice preset from the hub
a : Any = processor(text=self.input_string , voice_preset=self.voice_preset)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : int = self.get_tokenizer()
a : Union[str, Any] = BarkProcessor(tokenizer=UpperCAmelCase_)
a : Union[str, Any] = processor(text=self.input_string)
a : Tuple = tokenizer(
self.input_string , padding='max_length' , max_length=2_5_6 , add_special_tokens=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist())
| 345 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 1 |
'''simple docstring'''
UpperCamelCase : Any = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 345 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 1 |
'''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "M-CLIP"
def __init__( self : Dict , UpperCAmelCase_ : Any=1_0_2_4 , UpperCAmelCase_ : List[Any]=7_6_8 , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : int = transformerDimSize
a : str = imageDimSize
super().__init__(**UpperCAmelCase_)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Union[str, Any] = MCLIPConfig
def __init__( self : Optional[Any] , UpperCAmelCase_ : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
a : Optional[int] = XLMRobertaModel(UpperCAmelCase_)
a : Any = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : Optional[Any] = self.transformer(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)[0]
a : List[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None]
return self.LinearTransformation(UpperCAmelCase_), embs
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Any = {
"""configuration_convbert""": ["""CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvBertConfig""", """ConvBertOnnxConfig"""],
"""tokenization_convbert""": ["""ConvBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = ["""ConvBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvBertForMaskedLM""",
"""ConvBertForMultipleChoice""",
"""ConvBertForQuestionAnswering""",
"""ConvBertForSequenceClassification""",
"""ConvBertForTokenClassification""",
"""ConvBertLayer""",
"""ConvBertModel""",
"""ConvBertPreTrainedModel""",
"""load_tf_weights_in_convbert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = [
"""TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFConvBertForMaskedLM""",
"""TFConvBertForMultipleChoice""",
"""TFConvBertForQuestionAnswering""",
"""TFConvBertForSequenceClassification""",
"""TFConvBertForTokenClassification""",
"""TFConvBertLayer""",
"""TFConvBertModel""",
"""TFConvBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : List[Any] = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : str = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Tuple = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
UpperCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[Any]=7 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=9_9 , UpperCAmelCase_ : Optional[int]=3_6 , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Optional[int]=3_7 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : Tuple=1_6 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=6 , UpperCAmelCase_ : str=6 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Optional[Any]=4 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : str=1_0_0_0 , ):
"""simple docstring"""
a : int = parent
a : Union[str, Any] = batch_size
a : str = num_channels
a : Dict = image_size
a : List[Any] = patch_size
a : Any = text_seq_length
a : Optional[int] = is_training
a : Dict = use_input_mask
a : List[Any] = use_token_type_ids
a : Any = use_labels
a : Tuple = vocab_size
a : Tuple = hidden_size
a : Optional[Any] = num_hidden_layers
a : Dict = num_attention_heads
a : Any = intermediate_size
a : List[str] = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Optional[int] = max_position_embeddings
a : Optional[Any] = type_vocab_size
a : Union[str, Any] = type_sequence_label_size
a : Tuple = initializer_range
a : Optional[int] = coordinate_size
a : Dict = shape_size
a : Optional[Any] = num_labels
a : Union[str, Any] = num_choices
a : List[str] = scope
a : Union[str, Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
a : Any = text_seq_length
a : Any = (image_size // patch_size) ** 2 + 1
a : Optional[int] = self.text_seq_length + self.image_seq_length
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size)
a : List[str] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox)
# Ensure that bbox is legal
for i in range(bbox.shape[0]):
for j in range(bbox.shape[1]):
if bbox[i, j, 3] < bbox[i, j, 1]:
a : List[Any] = bbox[i, j, 3]
a : Optional[Any] = bbox[i, j, 1]
a : Tuple = t
if bbox[i, j, 2] < bbox[i, j, 0]:
a : Tuple = bbox[i, j, 2]
a : Dict = bbox[i, j, 0]
a : List[str] = t
a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : Dict = None
if self.use_input_mask:
a : Optional[Any] = random_attention_mask([self.batch_size, self.text_seq_length])
a : List[Any] = None
if self.use_token_type_ids:
a : Tuple = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size)
a : Optional[Any] = None
a : Optional[Any] = None
if self.use_labels:
a : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Any = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels)
a : Any = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : List[Any] = LayoutLMvaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
# text + image
a : List[Any] = model(UpperCAmelCase_ , pixel_values=UpperCAmelCase_)
a : Optional[int] = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
a : int = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_)
a : Dict = model(UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
# text only
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size))
# image only
a : Tuple = model(pixel_values=UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : List[Any] = self.num_labels
a : Optional[int] = LayoutLMvaForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : Optional[int] = self.num_labels
a : Dict = LayoutLMvaForTokenClassification(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : str = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : List[str] = LayoutLMvaForQuestionAnswering(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Dict = model(
UpperCAmelCase_ , bbox=UpperCAmelCase_ , pixel_values=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Optional[Any] = config_and_inputs
a : Union[str, Any] = {
'input_ids': input_ids,
'bbox': bbox,
'pixel_values': pixel_values,
'token_type_ids': token_type_ids,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Optional[Any] = False
A : str = False
A : List[str] = False
A : int = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A : str = (
{"document-question-answering": LayoutLMvaForQuestionAnswering, "feature-extraction": LayoutLMvaModel}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
return True
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = LayoutLMvaModelTester(self)
a : Optional[int] = ConfigTester(self , config_class=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int=False):
"""simple docstring"""
a : Any = copy.deepcopy(UpperCAmelCase_)
if model_class in get_values(UpperCAmelCase_):
a : Tuple = {
k: v.unsqueeze(1).expand(-1 , self.model_tester.num_choices , -1).contiguous()
if isinstance(UpperCAmelCase_ , torch.Tensor) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(UpperCAmelCase_):
a : str = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in get_values(UpperCAmelCase_):
a : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in [
*get_values(UpperCAmelCase_),
]:
a : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
elif model_class in [
*get_values(UpperCAmelCase_),
]:
a : str = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=UpperCAmelCase_ , )
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
a : int = type
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Any = LayoutLMvaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Any:
"""simple docstring"""
a : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=UpperCAmelCase_) if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = LayoutLMvaModel.from_pretrained('microsoft/layoutlmv3-base').to(UpperCAmelCase_)
a : List[str] = self.default_image_processor
a : Union[str, Any] = prepare_img()
a : Dict = image_processor(images=UpperCAmelCase_ , return_tensors='pt').pixel_values.to(UpperCAmelCase_)
a : Dict = torch.tensor([[1, 2]])
a : Any = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]]).unsqueeze(0)
# forward pass
a : Dict = model(
input_ids=input_ids.to(UpperCAmelCase_) , bbox=bbox.to(UpperCAmelCase_) , pixel_values=pixel_values.to(UpperCAmelCase_) , )
# verify the logits
a : Optional[int] = torch.Size((1, 1_9_9, 7_6_8))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : int = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
| 345 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[int] = logging.get_logger(__name__)
UpperCamelCase : Dict = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "markuplm"
def __init__( self : int , UpperCAmelCase_ : Dict=3_0_5_2_2 , UpperCAmelCase_ : Optional[Any]=7_6_8 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : List[str]=1_2 , UpperCAmelCase_ : int=3_0_7_2 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Any=5_1_2 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Tuple=1e-12 , UpperCAmelCase_ : Tuple=0 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Any=2_5_6 , UpperCAmelCase_ : Optional[int]=1_0_2_4 , UpperCAmelCase_ : Optional[Any]=2_1_6 , UpperCAmelCase_ : Union[str, Any]=1_0_0_1 , UpperCAmelCase_ : List[Any]=3_2 , UpperCAmelCase_ : List[Any]=5_0 , UpperCAmelCase_ : str="absolute" , UpperCAmelCase_ : List[str]=True , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : str = vocab_size
a : Tuple = hidden_size
a : Any = num_hidden_layers
a : int = num_attention_heads
a : Dict = hidden_act
a : Optional[int] = intermediate_size
a : Optional[Any] = hidden_dropout_prob
a : Dict = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : List[Any] = type_vocab_size
a : str = initializer_range
a : Dict = layer_norm_eps
a : Dict = position_embedding_type
a : Optional[int] = use_cache
a : Dict = classifier_dropout
# additional properties
a : Optional[Any] = max_depth
a : Optional[Any] = max_xpath_tag_unit_embeddings
a : Tuple = max_xpath_subs_unit_embeddings
a : int = tag_pad_id
a : Dict = subs_pad_id
a : List[str] = xpath_unit_hidden_size
| 345 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> bool:
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
a : List[Any] = sorted(string.lower() )
return len(snake_case ) == len(set(snake_case ) )
if __name__ == "__main__":
UpperCamelCase : List[str] = input("""Enter a string """).strip()
UpperCamelCase : List[Any] = is_isogram(input_str)
print(f'''{input_str} is {"an" if isogram else "not an"} isogram.''')
| 345 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
UpperCamelCase : Optional[Any] = {
"""naver-clova-ix/donut-base""": """https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json""",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = "donut-swin"
A : List[Any] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Tuple , UpperCAmelCase_ : Any=2_2_4 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : str=9_6 , UpperCAmelCase_ : Tuple=[2, 2, 6, 2] , UpperCAmelCase_ : List[Any]=[3, 6, 1_2, 2_4] , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Dict=4.0 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : List[str]=1e-5 , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : Optional[Any] = image_size
a : Optional[int] = patch_size
a : Dict = num_channels
a : Optional[int] = embed_dim
a : List[str] = depths
a : List[str] = len(UpperCAmelCase_)
a : str = num_heads
a : Optional[Any] = window_size
a : int = mlp_ratio
a : Optional[Any] = qkv_bias
a : List[str] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : int = drop_path_rate
a : Dict = hidden_act
a : Dict = use_absolute_embeddings
a : Optional[int] = layer_norm_eps
a : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
a : Tuple = int(embed_dim * 2 ** (len(UpperCAmelCase_) - 1))
| 345 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
UpperCamelCase : List[str] = logging.get_logger(__name__)
UpperCamelCase : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
UpperCamelCase : List[Any] = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
UpperCamelCase : Optional[int] = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
"""simple docstring"""
a : Tuple = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
a : Dict = bs[:]
a : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case )
cs.append(2**8 + n )
n += 1
a : int = [chr(snake_case ) for n in cs]
return dict(zip(snake_case , snake_case ) )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> str:
"""simple docstring"""
a : str = set()
a : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a : Tuple = char
return pairs
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A : Optional[Any] = ["input_ids", "attention_mask"]
def __init__( self : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any]="replace" , UpperCAmelCase_ : Union[str, Any]="<s>" , UpperCAmelCase_ : Tuple="</s>" , UpperCAmelCase_ : Dict="</s>" , UpperCAmelCase_ : Tuple="<s>" , UpperCAmelCase_ : Optional[int]="<unk>" , UpperCAmelCase_ : Any="<pad>" , UpperCAmelCase_ : Union[str, Any]="<mask>" , UpperCAmelCase_ : int=False , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else bos_token
a : str = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else eos_token
a : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else sep_token
a : Dict = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else cls_token
a : List[str] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else unk_token
a : Union[str, Any] = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
a : Any = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_) if isinstance(UpperCAmelCase_ , UpperCAmelCase_) else mask_token
super().__init__(
errors=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , add_prefix_space=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
a : str = json.load(UpperCAmelCase_)
a : int = {v: k for k, v in self.encoder.items()}
a : List[Any] = errors # how to handle errors in decoding
a : Union[str, Any] = bytes_to_unicode()
a : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase_ , encoding='utf-8') as merges_handle:
a : List[Any] = merges_handle.read().split('\n')[1:-1]
a : Optional[Any] = [tuple(merge.split()) for merge in bpe_merges]
a : List[str] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Optional[int] = {}
a : Union[str, Any] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
a : str = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+')
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return len(self.encoder)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a : Optional[Any] = tuple(UpperCAmelCase_)
a : Tuple = get_pairs(UpperCAmelCase_)
if not pairs:
return token
while True:
a : Optional[Any] = min(UpperCAmelCase_ , key=lambda UpperCAmelCase_: self.bpe_ranks.get(UpperCAmelCase_ , float('inf')))
if bigram not in self.bpe_ranks:
break
a , a : Union[str, Any] = bigram
a : Optional[Any] = []
a : List[str] = 0
while i < len(UpperCAmelCase_):
try:
a : List[Any] = word.index(UpperCAmelCase_ , UpperCAmelCase_)
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
a : Union[str, Any] = j
if word[i] == first and i < len(UpperCAmelCase_) - 1 and word[i + 1] == second:
new_word.append(first + second)
i += 2
else:
new_word.append(word[i])
i += 1
a : Dict = tuple(UpperCAmelCase_)
a : int = new_word
if len(UpperCAmelCase_) == 1:
break
else:
a : Tuple = get_pairs(UpperCAmelCase_)
a : Any = ' '.join(UpperCAmelCase_)
a : Tuple = word
return word
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : List[str] = []
for token in re.findall(self.pat , UpperCAmelCase_):
a : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8')) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase_).split(' '))
return bpe_tokens
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return self.encoder.get(UpperCAmelCase_ , self.encoder.get(self.unk_token))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
return self.decoder.get(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ''.join(UpperCAmelCase_)
a : int = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8' , errors=self.errors)
return text
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""")
return
a : Union[str, Any] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
a : int = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
a : Optional[Any] = 0
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as writer:
writer.write('#version: 0.2\n')
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase_: kv[1]):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!')
a : Tuple = token_index
writer.write(' '.join(UpperCAmelCase_) + '\n')
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : List[Any] = [self.cls_token_id]
a : Optional[int] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None , UpperCAmelCase_ : bool = False):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_)
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_)) + [1]
return [1] + ([0] * len(UpperCAmelCase_)) + [1, 1] + ([0] * len(UpperCAmelCase_)) + [1]
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : List[int] , UpperCAmelCase_ : Optional[List[int]] = None):
"""simple docstring"""
a : Union[str, Any] = [self.sep_token_id]
a : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any=False , **UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : List[Any] = kwargs.pop('add_prefix_space' , self.add_prefix_space)
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase_) > 0 and not text[0].isspace()):
a : Dict = ' ' + text
return (text, kwargs)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , ):
"""simple docstring"""
a : Union[str, Any] = super()._pad(
encoded_inputs=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding_strategy=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
# Load from model defaults
if return_attention_mask is None:
a : Dict = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a : Any = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a : Dict = len(encoded_inputs['global_attention_mask']) != len(UpperCAmelCase_)
if needs_to_be_padded:
a : Union[str, Any] = len(UpperCAmelCase_) - len(encoded_inputs['global_attention_mask'])
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a : Optional[Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
a : Dict = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side))
return encoded_inputs
| 345 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 1 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> bool:
"""simple docstring"""
a : Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : float = 1 / 12_345 ) -> int:
"""simple docstring"""
a : Optional[int] = 0
a : Optional[int] = 0
a : int = 3
while True:
a : List[str] = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(snake_case ):
a : Any = int(snake_case )
total_partitions += 1
if check_partition_perfect(snake_case ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(snake_case )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 345 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCamelCase : str = get_logger(__name__)
UpperCamelCase : int = Path(__file__).parent / """model_card_template.md"""
UpperCamelCase : Any = uuida().hex
UpperCamelCase : List[Any] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase : Tuple = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES
UpperCamelCase : List[Any] = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/"""
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None ) -> str:
"""simple docstring"""
a : Any = F"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}"""
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += F"""; torch/{_torch_version}"""
if is_flax_available():
ua += F"""; jax/{_jax_version}"""
ua += F"""; flax/{_flax_version}"""
if is_onnx_available():
ua += F"""; onnxruntime/{_onnxruntime_version}"""
# CI will set this value to True
if os.environ.get('DIFFUSERS_IS_CI' , '' ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(snake_case , snake_case ):
ua += "; " + "; ".join(F"""{k}/{v}""" for k, v in user_agent.items() )
elif isinstance(snake_case , snake_case ):
ua += "; " + user_agent
return ua
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None ) -> Optional[int]:
"""simple docstring"""
if token is None:
a : Union[str, Any] = HfFolder.get_token()
if organization is None:
a : List[str] = whoami(snake_case )['name']
return F"""{username}/{model_id}"""
else:
return F"""{organization}/{model_id}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
if not is_jinja_available():
raise ValueError(
'Modelcard rendering is based on Jinja templates.'
' Please make sure to have `jinja` installed before using `create_model_card`.'
' To install it, please run `pip install Jinja2`.' )
if hasattr(snake_case , 'local_rank' ) and args.local_rank not in [-1, 0]:
return
a : Dict = args.hub_token if hasattr(snake_case , 'hub_token' ) else None
a : Dict = get_full_repo_name(snake_case , token=snake_case )
a : str = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language='en' , license='apache-2.0' , library_name='diffusers' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , 'dataset_name' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(snake_case , 'gradient_accumulation_steps' ) else None
) , adam_betaa=args.adam_betaa if hasattr(snake_case , 'adam_beta1' ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , 'adam_beta2' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , 'adam_weight_decay' ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , 'adam_epsilon' ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , 'lr_scheduler' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , 'lr_warmup_steps' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , 'ema_inv_gamma' ) else None , ema_power=args.ema_power if hasattr(snake_case , 'ema_power' ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , 'ema_max_decay' ) else None , mixed_precision=args.mixed_precision , )
a : Tuple = os.path.join(args.output_dir , 'README.md' )
model_card.save(snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None ) -> str:
"""simple docstring"""
if resolved_file is None or commit_hash is not None:
return commit_hash
a : Optional[int] = str(Path(snake_case ).as_posix() )
a : Tuple = re.search(R'snapshots/([^/]+)/' , snake_case )
if search is None:
return None
a : Tuple = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCamelCase : Optional[int] = os.path.expanduser(
os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface"""))
)
UpperCamelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""")
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None ) -> None:
"""simple docstring"""
if new_cache_dir is None:
a : Optional[Any] = DIFFUSERS_CACHE
if old_cache_dir is None:
a : Any = old_diffusers_cache
a : Dict = Path(snake_case ).expanduser()
a : str = Path(snake_case ).expanduser()
for old_blob_path in old_cache_dir.glob('**/blobs/*' ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
a : List[Any] = new_cache_dir / old_blob_path.relative_to(snake_case )
new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case )
os.replace(snake_case , snake_case )
try:
os.symlink(snake_case , snake_case )
except OSError:
logger.warning(
'Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.' )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCamelCase : List[str] = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""")
if not os.path.isfile(cache_version_file):
UpperCamelCase : str = 0
else:
with open(cache_version_file) as f:
try:
UpperCamelCase : Union[str, Any] = int(f.read())
except ValueError:
UpperCamelCase : Optional[Any] = 0
if cache_version < 1:
UpperCamelCase : Tuple = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
"""The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """
"""existing cached models. This is a one-time operation, you can interrupt it or run it """
"""later by calling `diffusers.utils.hub_utils.move_cache()`."""
)
try:
move_cache()
except Exception as e:
UpperCamelCase : Optional[int] = """\n""".join(traceback.format_tb(e.__traceback__))
logger.error(
f'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease '''
"""file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """
"""message and we will do our best to help."""
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, """w""") as f:
f.write("""1""")
except Exception:
logger.warning(
f'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure '''
"""the directory exists and can be written to."""
)
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None ) -> str:
"""simple docstring"""
if variant is not None:
a : Optional[int] = weights_name.split('.' )
a : Dict = splits[:-1] + [variant] + splits[-1:]
a : Any = '.'.join(snake_case )
return weights_name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , *,
snake_case : Optional[Any] , snake_case : str , snake_case : int , snake_case : List[str] , snake_case : Optional[Any] , snake_case : List[str] , snake_case : Any , snake_case : Union[str, Any] , snake_case : str , snake_case : List[str] , snake_case : Optional[int]=None , ) -> Optional[int]:
"""simple docstring"""
a : Optional[int] = str(snake_case )
if os.path.isfile(snake_case ):
return pretrained_model_name_or_path
elif os.path.isdir(snake_case ):
if os.path.isfile(os.path.join(snake_case , snake_case ) ):
# Load from a PyTorch checkpoint
a : Optional[int] = os.path.join(snake_case , snake_case )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(snake_case , snake_case , snake_case ) ):
a : List[Any] = os.path.join(snake_case , snake_case , snake_case )
return model_file
else:
raise EnvironmentError(
F"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(snake_case ).base_version ) >= version.parse('0.20.0' )
):
try:
a : Tuple = hf_hub_download(
snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
warnings.warn(
F"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , snake_case , )
return model_file
except: # noqa: E722
warnings.warn(
F"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}' so that the correct variant file can be added.""" , snake_case , )
try:
# 2. Load model file as usual
a : Any = hf_hub_download(
snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """
'listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a '
'token having permission to this repo with `use_auth_token` or log in with `huggingface-cli '
'login`.' )
except RevisionNotFoundError:
raise EnvironmentError(
F"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """
'this model name. Check the model page at '
F"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" )
except EntryNotFoundError:
raise EnvironmentError(
F"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" )
except HTTPError as err:
raise EnvironmentError(
F"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" )
except ValueError:
raise EnvironmentError(
F"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it"""
F""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a"""
F""" directory containing a file named {weights_name} or"""
' \nCheckout your internet connection or see how to run the library in'
' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.' )
except EnvironmentError:
raise EnvironmentError(
F"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """
'\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. '
F"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """
F"""containing a file named {weights_name}""" )
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
UpperCamelCase : List[Any] = logging.get_logger(__name__)
UpperCamelCase : Union[str, Any] = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = "layoutlmv3"
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=5_0_2_6_5 , UpperCAmelCase_ : List[str]=7_6_8 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=3_0_7_2 , UpperCAmelCase_ : Tuple="gelu" , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Dict=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : str=1 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Optional[Any]=1_0_2_4 , UpperCAmelCase_ : str=1_2_8 , UpperCAmelCase_ : Optional[int]=1_2_8 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : str=3_2 , UpperCAmelCase_ : Optional[int]=1_2_8 , UpperCAmelCase_ : int=6_4 , UpperCAmelCase_ : Dict=2_5_6 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=2_2_4 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : str=1_6 , UpperCAmelCase_ : List[str]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(
vocab_size=UpperCAmelCase_ , hidden_size=UpperCAmelCase_ , num_hidden_layers=UpperCAmelCase_ , num_attention_heads=UpperCAmelCase_ , intermediate_size=UpperCAmelCase_ , hidden_act=UpperCAmelCase_ , hidden_dropout_prob=UpperCAmelCase_ , attention_probs_dropout_prob=UpperCAmelCase_ , max_position_embeddings=UpperCAmelCase_ , type_vocab_size=UpperCAmelCase_ , initializer_range=UpperCAmelCase_ , layer_norm_eps=UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
a : List[Any] = max_ad_position_embeddings
a : Optional[int] = coordinate_size
a : Tuple = shape_size
a : Any = has_relative_attention_bias
a : Optional[Any] = rel_pos_bins
a : List[Any] = max_rel_pos
a : int = has_spatial_attention_bias
a : Union[str, Any] = rel_ad_pos_bins
a : Any = max_rel_ad_pos
a : Dict = text_embed
a : List[Any] = visual_embed
a : List[str] = input_size
a : int = num_channels
a : List[Any] = patch_size
a : Optional[int] = classifier_dropout
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = version.parse("1.12" )
@property
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
])
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
])
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
return 1e-5
@property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return 1_2
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : "ProcessorMixin" , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : int = -1 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional["TensorType"] = None , UpperCAmelCase_ : int = 3 , UpperCAmelCase_ : int = 4_0 , UpperCAmelCase_ : int = 4_0 , ):
"""simple docstring"""
setattr(processor.image_processor , 'apply_ocr' , UpperCAmelCase_)
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
a : List[str] = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0)
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
a : Optional[Any] = processor.tokenizer.num_special_tokens_to_add(UpperCAmelCase_)
a : Dict = compute_effective_axis_dimension(
UpperCAmelCase_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase_)
# Generate dummy inputs according to compute batch and sequence
a : Dict = [[' '.join([processor.tokenizer.unk_token]) * seq_length]] * batch_size
# Generate dummy bounding boxes
a : int = [[[4_8, 8_4, 7_3, 1_2_8]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
a : Tuple = self._generate_dummy_images(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
a : int = dict(
processor(
UpperCAmelCase_ , text=UpperCAmelCase_ , boxes=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , ))
return inputs
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
UpperCamelCase : int = 250_004
UpperCamelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Any = MBartaaTokenizer
A : List[Any] = MBartaaTokenizerFast
A : str = True
A : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
a : Optional[Any] = MBartaaTokenizer(UpperCAmelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCAmelCase_)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[int] = '<s>'
a : int = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_) , UpperCAmelCase_)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Union[str, Any] = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , '<mask>')
self.assertEqual(len(UpperCAmelCase_) , 1_0_5_4)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Tuple = MBartaaTokenizer(UpperCAmelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCAmelCase_)
a : Any = tokenizer.tokenize('This is a test')
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
a : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
a : Optional[Any] = tokenizer.convert_tokens_to_ids(UpperCAmelCase_)
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
a : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_)
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = {'input_ids': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
a : Union[str, Any] = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a : int = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
a : int = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
a : str = tempfile.mkdtemp()
a : Any = tokenizer_r.save_pretrained(UpperCAmelCase_)
a : Union[str, Any] = tokenizer_p.save_pretrained(UpperCAmelCase_)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
a : str = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f)
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Checks everything loads correctly in the same way
a : Any = tokenizer_r.from_pretrained(UpperCAmelCase_)
a : str = tokenizer_p.from_pretrained(UpperCAmelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_))
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_)
# Save tokenizer rust, legacy_format=True
a : List[Any] = tempfile.mkdtemp()
a : Optional[Any] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_)
a : List[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_)
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_)
# Checks everything loads correctly in the same way
a : int = tokenizer_r.from_pretrained(UpperCAmelCase_)
a : List[str] = tokenizer_p.from_pretrained(UpperCAmelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_))
shutil.rmtree(UpperCAmelCase_)
# Save tokenizer rust, legacy_format=False
a : Dict = tempfile.mkdtemp()
a : str = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_)
a : List[str] = tokenizer_p.save_pretrained(UpperCAmelCase_)
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
a : Union[str, Any] = tokenizer_r.from_pretrained(UpperCAmelCase_)
a : Optional[Any] = tokenizer_p.from_pretrained(UpperCAmelCase_)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_))
shutil.rmtree(UpperCAmelCase_)
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
A : Optional[int] = "facebook/mbart-large-50-one-to-many-mmt"
A : Dict = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
A : List[Any] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
A : int = [EN_CODE, 8274, 12_7873, 2_5916, 7, 8622, 2071, 438, 6_7485, 53, 18_7895, 23, 5_1712, 2]
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : Any):
"""simple docstring"""
a : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO')
a : str = 1
return cls
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 2_5_0_0_0_1)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 2_5_0_0_0_4)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 2_5_0_0_2_0)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 2_5_0_0_3_8)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Dict = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids)
a : Dict = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
a : Any = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_)
a : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_)
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_)
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : str = ['this is gunna be a long sentence ' * 2_0]
assert isinstance(src_text[0] , UpperCAmelCase_)
a : Dict = 1_0
a : Optional[int] = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_).input_ids[0]
self.assertEqual(ids[0] , UpperCAmelCase_)
self.assertEqual(ids[-1] , 2)
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']) , [2_5_0_0_5_3, 2_5_0_0_0_1])
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Any = tempfile.mkdtemp()
a : int = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase_)
a : Tuple = MBartaaTokenizer.from_pretrained(UpperCAmelCase_)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_)
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Union[str, Any] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors='pt')
a : List[Any] = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : List[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens) , return_tensors='pt' , )
a : Dict = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual((2, 1_4) , batch.input_ids.shape)
self.assertEqual((2, 1_4) , batch.attention_mask.shape)
a : str = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_)
self.assertEqual(2 , batch.decoder_input_ids[0, 0]) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Dict = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt')
a : Tuple = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=1_0 , return_tensors='pt')
a : Tuple = targets['input_ids']
a : List[Any] = shift_tokens_right(UpperCAmelCase_ , self.tokenizer.pad_token_id)
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0)
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Dict = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR')
self.assertEqual(
nested_simplify(UpperCAmelCase_) , {
# en_XX, A, test, EOS
'input_ids': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 2_5_0_0_0_1,
} , )
| 345 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : bool = False ) -> list[float]:
"""simple docstring"""
if radian_mode:
return [magnitude * cos(snake_case ), magnitude * sin(snake_case )]
return [magnitude * cos(radians(snake_case ) ), magnitude * sin(radians(snake_case ) )]
def SCREAMING_SNAKE_CASE__ ( snake_case : NDArray[floataa] , snake_case : NDArray[floataa] , snake_case : float = 10**-1 ) -> bool:
"""simple docstring"""
a : NDArray[floataa] = cross(snake_case , snake_case )
a : float = sum(snake_case )
return abs(snake_case ) < eps
if __name__ == "__main__":
# Test to check if it works
UpperCamelCase : List[Any] = array(
[
polar_force(7_18.4, 180 - 30),
polar_force(8_79.54, 45),
polar_force(100, -90),
]
)
UpperCamelCase : NDArray[floataa] = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
UpperCamelCase : Any = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
UpperCamelCase : int = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
UpperCamelCase : Optional[Any] = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
UpperCamelCase : int = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 1 |
'''simple docstring'''
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : Dict = {"""vocab_file""": """vocab.json"""}
UpperCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""mgp-str""": """https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json""",
}
}
UpperCamelCase : Any = {"""mgp-str""": 27}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = VOCAB_FILES_NAMES
A : Dict = PRETRAINED_VOCAB_FILES_MAP
A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Any , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int="[GO]" , UpperCAmelCase_ : int="[GO]" , UpperCAmelCase_ : Optional[int]="[s]" , UpperCAmelCase_ : Dict="[GO]" , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
super().__init__(
unk_token=UpperCAmelCase_ , bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , **UpperCAmelCase_ , )
with open(UpperCAmelCase_ , encoding='utf-8') as vocab_handle:
a : Optional[int] = json.load(UpperCAmelCase_)
a : Any = {v: k for k, v in self.vocab.items()}
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return len(self.vocab)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : List[str] = []
for s in text:
char_tokens.extend(UpperCAmelCase_)
return char_tokens
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return self.vocab.get(UpperCAmelCase_ , self.vocab.get(self.unk_token))
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
return self.decoder.get(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[str] = None):
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase_):
logger.error('Vocabulary path ({}) should be a directory'.format(UpperCAmelCase_))
return
a : Optional[int] = os.path.join(
UpperCAmelCase_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'])
with open(UpperCAmelCase_ , 'w' , encoding='utf-8') as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=UpperCAmelCase_ , ensure_ascii=UpperCAmelCase_) + '\n')
return (vocab_file,)
| 345 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 1 |
'''simple docstring'''
import math
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int ) -> int:
"""simple docstring"""
a : Optional[int] = len(snake_case )
a : Optional[int] = int(math.floor(math.sqrt(snake_case ) ) )
a : Tuple = 0
while arr[min(snake_case , snake_case ) - 1] < x:
a : Optional[int] = step
step += int(math.floor(math.sqrt(snake_case ) ) )
if prev >= n:
return -1
while arr[prev] < x:
a : Any = prev + 1
if prev == min(snake_case , snake_case ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
UpperCamelCase : int = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
UpperCamelCase : Dict = int(input("""Enter the number to be searched:\n"""))
UpperCamelCase : Any = jump_search(arr, x)
if res == -1:
print("""Number not found!""")
else:
print(f'''Number {x} is at index {res}''')
| 345 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCamelCase : Optional[int] = """platform"""
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Optional[Any] , snake_case : Tuple=None , snake_case : List[str]=None , snake_case : int=None , snake_case : List[Any]=None , snake_case : List[Any]=None , snake_case : Union[str, Any]=None , ) -> Optional[Any]:
"""simple docstring"""
if attention_mask is None:
a : str = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
a : Dict = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
a : List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a : List[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
a : Any = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str]=1_3 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : Dict=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Optional[int]=9_9 , UpperCAmelCase_ : int=1_6 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : str=4 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : List[Any]=3_2 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : List[str]=0 , UpperCAmelCase_ : List[Any]=0.02 , ):
"""simple docstring"""
a : Optional[Any] = parent
a : List[Any] = batch_size
a : Optional[int] = seq_length
a : Optional[int] = is_training
a : Optional[Any] = use_labels
a : Tuple = vocab_size
a : List[Any] = hidden_size
a : List[Any] = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Tuple = intermediate_size
a : Any = hidden_act
a : Optional[Any] = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Tuple = max_position_embeddings
a : List[str] = eos_token_id
a : Any = pad_token_id
a : Tuple = bos_token_id
a : int = initializer_range
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size) , 3 , self.vocab_size)
a : Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa)) , -1)
a : str = shift_tokens_right(UpperCAmelCase_ , 1 , 2)
a : Tuple = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=UpperCAmelCase_ , )
a : Optional[int] = prepare_blenderbot_inputs_dict(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
a : Optional[Any] = 2_0
a : List[str] = model_class_name(UpperCAmelCase_)
a : List[Any] = model.encode(inputs_dict['input_ids'])
a , a : Tuple = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a : List[str] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
a : Optional[int] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4')
a : Dict = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : List[str] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
a : int = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
a : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCAmelCase_ , )
a : Union[str, Any] = model.decode(UpperCAmelCase_ , UpperCAmelCase_)
a : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
a : Union[str, Any] = 2_0
a : List[str] = model_class_name(UpperCAmelCase_)
a : Dict = model.encode(inputs_dict['input_ids'])
a , a : List[Any] = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
a : Union[str, Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])),
] , axis=-1 , )
a : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , UpperCAmelCase_ , UpperCAmelCase_)
a : int = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
a : List[Any] = model.decode(
decoder_input_ids[:, :-1] , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
a : Optional[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4')
a : List[Any] = model.decode(
decoder_input_ids[:, -1:] , UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCAmelCase_ , decoder_position_ids=UpperCAmelCase_ , )
a : int = model.decode(UpperCAmelCase_ , UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_)
a : Optional[int] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5])))
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""")
@require_flax
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
A : Optional[Any] = 99
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Tuple = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
a : Union[str, Any] = input_ids.shape[0]
a : Dict = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a , a , a : List[str] = self._get_config_and_data()
a : str = FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase_)
a : Optional[Any] = lm_model(input_ids=UpperCAmelCase_)
a : Optional[int] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['logits'].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
a : int = FlaxBlenderbotSmallForConditionalGeneration(UpperCAmelCase_)
a : Tuple = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa)
a : List[Any] = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa)
a : str = lm_model(input_ids=UpperCAmelCase_ , decoder_input_ids=UpperCAmelCase_)
a : Any = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['logits'].shape , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Any = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa)
a : str = shift_tokens_right(UpperCAmelCase_ , 1 , 2)
a : Optional[int] = np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
a : Dict = np.equal(UpperCAmelCase_ , 1).astype(np.floataa).sum()
self.assertEqual(shifted.shape , input_ids.shape)
self.assertEqual(UpperCAmelCase_ , n_pad_before - 1)
self.assertTrue(np.equal(shifted[:, 0] , 2).all())
@require_flax
class UpperCamelCase ( a_ , unittest.TestCase , a_ ):
"""simple docstring"""
A : List[str] = True
A : List[Any] = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
A : Union[str, Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Optional[Any] = FlaxBlenderbotSmallModelTester(self)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a , a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a , a : List[str] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a , a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a : Tuple = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_)
a : str = model_class(UpperCAmelCase_)
@jax.jit
def encode_jitted(UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str=None , **UpperCAmelCase_ : int):
return model.encode(input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_)
with self.subTest('JIT Enabled'):
a : str = encode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
a : Tuple = encode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a , a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__):
a : Dict = model_class(UpperCAmelCase_)
a : Any = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'])
a : Union[str, Any] = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple):
return model.decode(
decoder_input_ids=UpperCAmelCase_ , decoder_attention_mask=UpperCAmelCase_ , encoder_outputs=UpperCAmelCase_ , )
with self.subTest('JIT Enabled'):
a : Optional[Any] = decode_jitted(**UpperCAmelCase_).to_tuple()
with self.subTest('JIT Disabled'):
with jax.disable_jit():
a : Optional[Any] = decode_jitted(**UpperCAmelCase_).to_tuple()
self.assertEqual(len(UpperCAmelCase_) , len(UpperCAmelCase_))
for jitted_output, output in zip(UpperCAmelCase_ , UpperCAmelCase_):
self.assertEqual(jitted_output.shape , output.shape)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
for model_class_name in self.all_model_classes:
a : Dict = model_class_name.from_pretrained('facebook/blenderbot_small-90M')
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
a : List[str] = np.ones((1, 1)) * model.config.eos_token_id
a : Optional[int] = model(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
| 345 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : float = 0.0
A : int = 1
A : int = 1
A : bool = True
A : bool = False
A : bool = False
A : bool = False
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = []
a : Optional[Any] = []
for i in range(self.num_layers):
a : int = self.in_channels if i == 0 else self.out_channels
a : Any = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Optional[Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_)
a : Optional[int] = resnets
a : List[Any] = attentions
if self.add_downsample:
a : Dict = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int=True):
"""simple docstring"""
a : List[str] = ()
for resnet, attn in zip(self.resnets , self.attentions):
a : str = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
a : int = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
output_states += (hidden_states,)
if self.add_downsample:
a : Any = self.downsamplers_a(UpperCAmelCase_)
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : float = 0.0
A : int = 1
A : bool = True
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = []
for i in range(self.num_layers):
a : Dict = self.in_channels if i == 0 else self.out_channels
a : Tuple = FlaxResnetBlockaD(
in_channels=UpperCAmelCase_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Any = resnets
if self.add_downsample:
a : Optional[int] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str=True):
"""simple docstring"""
a : Dict = ()
for resnet in self.resnets:
a : Any = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
output_states += (hidden_states,)
if self.add_downsample:
a : Tuple = self.downsamplers_a(UpperCAmelCase_)
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : int
A : float = 0.0
A : int = 1
A : int = 1
A : bool = True
A : bool = False
A : bool = False
A : bool = False
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = []
a : str = []
for i in range(self.num_layers):
a : List[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a : List[Any] = self.prev_output_channel if i == 0 else self.out_channels
a : List[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_)
a : int = resnets
a : Dict = attentions
if self.add_upsample:
a : Optional[Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]=True):
"""simple docstring"""
for resnet, attn in zip(self.resnets , self.attentions):
# pop res hidden states
a : Union[str, Any] = res_hidden_states_tuple[-1]
a : Tuple = res_hidden_states_tuple[:-1]
a : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
a : Tuple = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
a : str = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
if self.add_upsample:
a : List[str] = self.upsamplers_a(UpperCAmelCase_)
return hidden_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : int
A : int
A : float = 0.0
A : int = 1
A : bool = True
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Tuple = []
for i in range(self.num_layers):
a : Any = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
a : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : int = resnets
if self.add_upsample:
a : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype)
def __call__( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int=True):
"""simple docstring"""
for resnet in self.resnets:
# pop res hidden states
a : Optional[Any] = res_hidden_states_tuple[-1]
a : Dict = res_hidden_states_tuple[:-1]
a : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1)
a : int = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
if self.add_upsample:
a : List[str] = self.upsamplers_a(UpperCAmelCase_)
return hidden_states
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
A : int
A : float = 0.0
A : int = 1
A : int = 1
A : bool = False
A : bool = False
A : jnp.dtype = jnp.floataa
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
a : Any = []
for _ in range(self.num_layers):
a : int = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(UpperCAmelCase_)
a : Tuple = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(UpperCAmelCase_)
a : Tuple = resnets
a : Tuple = attentions
def __call__( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=True):
"""simple docstring"""
a : Dict = self.resnets[0](UpperCAmelCase_ , UpperCAmelCase_)
for attn, resnet in zip(self.attentions , self.resnets[1:]):
a : Dict = attn(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
a : int = resnet(UpperCAmelCase_ , UpperCAmelCase_ , deterministic=UpperCAmelCase_)
return hidden_states
| 345 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 1 |
'''simple docstring'''
from collections import namedtuple
UpperCamelCase : Dict = namedtuple("""from_to""", """from_ to""")
UpperCamelCase : Union[str, Any] = {
"""cubicmeter""": from_to(1, 1),
"""litre""": from_to(0.0_01, 1_000),
"""kilolitre""": from_to(1, 1),
"""gallon""": from_to(0.0_04_54, 2_64.1_72),
"""cubicyard""": from_to(0.7_64_55, 1.3_07_95),
"""cubicfoot""": from_to(0.0_28, 35.31_47),
"""cup""": from_to(0.0_00_23_65_88, 42_26.75),
}
def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : str , snake_case : str ) -> float:
"""simple docstring"""
if from_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ', '.join(snake_case ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ', '.join(snake_case ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
torch.manual_seed(0)
a : Dict = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
torch.manual_seed(0)
a : List[Any] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=3 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
torch.manual_seed(0)
a : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : int = self.dummy_uncond_unet
a : Tuple = DDIMScheduler()
a : Dict = self.dummy_vq_model
a : str = LDMPipeline(unet=UpperCAmelCase_ , vqvae=UpperCAmelCase_ , scheduler=UpperCAmelCase_)
ldm.to(UpperCAmelCase_)
ldm.set_progress_bar_config(disable=UpperCAmelCase_)
a : str = torch.manual_seed(0)
a : Dict = ldm(generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='numpy').images
a : Optional[Any] = torch.manual_seed(0)
a : Union[str, Any] = ldm(generator=UpperCAmelCase_ , num_inference_steps=2 , output_type='numpy' , return_dict=UpperCAmelCase_)[0]
a : str = image[0, -3:, -3:, -1]
a : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
a : Dict = np.array([0.85_12, 0.8_18, 0.64_11, 0.68_08, 0.44_65, 0.56_18, 0.46, 0.62_31, 0.51_72])
a : Union[str, Any] = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice).max() < tolerance
@slow
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : str = LDMPipeline.from_pretrained('CompVis/ldm-celebahq-256')
ldm.to(UpperCAmelCase_)
ldm.set_progress_bar_config(disable=UpperCAmelCase_)
a : Optional[Any] = torch.manual_seed(0)
a : Union[str, Any] = ldm(generator=UpperCAmelCase_ , num_inference_steps=5 , output_type='numpy').images
a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
a : Dict = np.array([0.43_99, 0.4_49_75, 0.4_68_25, 0.4_74, 0.43_59, 0.45_81, 0.4_50_95, 0.43_41, 0.44_47])
a : Any = 1e-2 if torch_device != 'mps' else 3e-2
assert np.abs(image_slice.flatten() - expected_slice).max() < tolerance
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | '''simple docstring'''
import torch
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
if torch.cuda.is_available():
a : int = torch.cuda.device_count()
else:
a : Any = 0
print(F"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 345 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : int = logging.get_logger(__name__)
UpperCamelCase : str = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = "lilt"
def __init__( self : List[Any] , UpperCAmelCase_ : Union[str, Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : List[Any]=1_2 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : Dict=3_0_7_2 , UpperCAmelCase_ : List[str]="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Dict=5_1_2 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-12 , UpperCAmelCase_ : Union[str, Any]=0 , UpperCAmelCase_ : Optional[Any]="absolute" , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : int=1_0_2_4 , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : int = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : List[str] = num_attention_heads
a : Tuple = hidden_act
a : List[Any] = intermediate_size
a : Tuple = hidden_dropout_prob
a : Optional[Any] = attention_probs_dropout_prob
a : int = max_position_embeddings
a : int = type_vocab_size
a : int = initializer_range
a : Union[str, Any] = layer_norm_eps
a : Union[str, Any] = position_embedding_type
a : Optional[int] = classifier_dropout
a : int = channel_shrink_ratio
a : Dict = max_ad_position_embeddings
| 345 | '''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase : Optional[Any] = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple="auto" , UpperCAmelCase_ : Any=-1 , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : int=5_0_0 , UpperCAmelCase_ : int="gpt2-large" , UpperCAmelCase_ : Tuple=-1 , UpperCAmelCase_ : Dict=1_0_2_4 , UpperCAmelCase_ : List[str]=2_5 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : str=2_5 , ):
"""simple docstring"""
a : List[str] = compute_mauve(
p_text=UpperCAmelCase_ , q_text=UpperCAmelCase_ , p_features=UpperCAmelCase_ , q_features=UpperCAmelCase_ , p_tokens=UpperCAmelCase_ , q_tokens=UpperCAmelCase_ , num_buckets=UpperCAmelCase_ , pca_max_data=UpperCAmelCase_ , kmeans_explained_var=UpperCAmelCase_ , kmeans_num_redo=UpperCAmelCase_ , kmeans_max_iter=UpperCAmelCase_ , featurize_model_name=UpperCAmelCase_ , device_id=UpperCAmelCase_ , max_text_length=UpperCAmelCase_ , divergence_curve_discretization_size=UpperCAmelCase_ , mauve_scaling_factor=UpperCAmelCase_ , verbose=UpperCAmelCase_ , seed=UpperCAmelCase_ , )
return out
| 345 | 1 |
'''simple docstring'''
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]=1_3 , UpperCAmelCase_ : Union[str, Any]=3_0 , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : List[str]=3_2 , UpperCAmelCase_ : List[Any]=5 , UpperCAmelCase_ : List[str]=4 , UpperCAmelCase_ : int=3_7 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=3 , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : Dict=2 , ):
"""simple docstring"""
a : Optional[int] = parent
a : Any = batch_size
a : str = image_size
a : List[Any] = patch_size
a : Dict = num_channels
a : List[str] = is_training
a : Any = use_labels
a : Union[str, Any] = hidden_size
a : List[str] = num_hidden_layers
a : List[str] = num_attention_heads
a : Optional[Any] = intermediate_size
a : str = hidden_act
a : str = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Optional[Any] = type_sequence_label_size
a : Optional[Any] = initializer_range
a : List[Any] = scope
a : List[str] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
a : str = (image_size // patch_size) ** 2
a : Dict = num_patches + 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : Tuple = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[Any] = DeiTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Tuple = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : int = DeiTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : List[str] = 1
a : Tuple = DeiTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
a : List[str] = self.type_sequence_label_size
a : Optional[int] = DeiTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = DeiTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : int = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Dict = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : List[str] = config_and_inputs
a : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : Tuple = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[int] = (
{
"feature-extraction": DeiTModel,
"image-classification": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
A : Union[str, Any] = False
A : List[str] = False
A : Union[str, Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Tuple = DeiTModelTester(self)
a : int = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Any = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : int = model_class(UpperCAmelCase_)
a : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Tuple = [*signature.parameters.keys()]
a : List[str] = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple=False):
"""simple docstring"""
a : int = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
if not self.model_tester.is_training:
return
a , a : str = self.model_tester.prepare_config_and_inputs_for_common()
a : List[str] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(UpperCAmelCase_)
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
a : Union[str, Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
a : Optional[Any] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
a : Optional[int] = model(**UpperCAmelCase_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a , a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
a : Dict = False
a : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(UpperCAmelCase_) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
a : Any = model_class(UpperCAmelCase_)
model.gradient_checkpointing_enable()
model.to(UpperCAmelCase_)
model.train()
a : Any = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
a : List[str] = model(**UpperCAmelCase_).loss
loss.backward()
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a , a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
a : Union[str, Any] = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(UpperCAmelCase_),
*get_values(UpperCAmelCase_),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type['title']}"""):
a : List[Any] = problem_type['title']
a : Any = problem_type['num_labels']
a : Optional[Any] = model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.train()
a : Tuple = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if problem_type["num_labels"] > 1:
a : Optional[int] = inputs['labels'].unsqueeze(1).repeat(1 , problem_type['num_labels'])
a : Any = inputs['labels'].to(problem_type['dtype'])
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=UpperCAmelCase_) as warning_list:
a : int = model(**UpperCAmelCase_).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""")
loss.backward()
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = DeiTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Dict:
"""simple docstring"""
a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224')
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Optional[Any] = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224').to(
UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : Dict = prepare_img()
a : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : int = model(**UpperCAmelCase_)
# verify the logits
a : Optional[int] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Dict = torch.tensor([-1.02_66, 0.19_12, -1.28_61]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[int] = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : Any = prepare_img()
a : Union[str, Any] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Union[str, Any] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : list[int | float] , snake_case : int , snake_case : int ) -> int | float:
"""simple docstring"""
if len(snake_case ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(snake_case )
or left < -len(snake_case )
or right >= len(snake_case )
or right < -len(snake_case )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
a : Union[str, Any] = (left + right) >> 1 # the middle
a : List[str] = find_max(snake_case , snake_case , snake_case ) # find max in range[left, mid]
a : Dict = find_max(snake_case , mid + 1 , snake_case ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 345 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | '''simple docstring'''
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase : int = """true"""
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : int=82 , snake_case : Tuple=16 ) -> Union[str, Any]:
"""simple docstring"""
set_seed(42 )
a : List[str] = RegressionModel()
a : Union[str, Any] = deepcopy(snake_case )
a : Dict = RegressionDataset(length=snake_case )
a : Dict = DataLoader(snake_case , batch_size=snake_case )
model.to(accelerator.device )
a , a : Optional[int] = accelerator.prepare(snake_case , snake_case )
return model, ddp_model, dataloader
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Union[str, Any]=False ) -> Optional[int]:
"""simple docstring"""
a : List[str] = AutoTokenizer.from_pretrained('hf-internal-testing/mrpc-bert-base-cased' )
a : Any = load_dataset('glue' , 'mrpc' , split='validation' )
def tokenize_function(snake_case : int ):
a : Any = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=snake_case , max_length=snake_case )
return outputs
with accelerator.main_process_first():
a : Dict = dataset.map(
snake_case , batched=snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
a : List[str] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case : Optional[Any] ):
if use_longest:
return tokenizer.pad(snake_case , padding='longest' , return_tensors='pt' )
return tokenizer.pad(snake_case , padding='max_length' , max_length=128 , return_tensors='pt' )
return DataLoader(snake_case , shuffle=snake_case , collate_fn=snake_case , batch_size=16 )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
a : int = Accelerator(dispatch_batches=snake_case , split_batches=snake_case )
a : List[str] = get_dataloader(snake_case , not dispatch_batches )
a : Optional[Any] = AutoModelForSequenceClassification.from_pretrained(
'hf-internal-testing/mrpc-bert-base-cased' , return_dict=snake_case )
a , a : Optional[Any] = accelerator.prepare(snake_case , snake_case )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a : Dict = []
for batch in dataloader:
a , a : Any = batch.values()
with torch.no_grad():
a : Tuple = model(snake_case )
a , a : Dict = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
a , a : List[str] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case )
targs.append(snake_case )
a , a : Any = torch.cat(snake_case ), torch.cat(snake_case )
return logits, targs
def SCREAMING_SNAKE_CASE__ ( snake_case : Accelerator , snake_case : Dict=82 , snake_case : str=False , snake_case : List[str]=False , snake_case : List[Any]=16 ) -> Optional[int]:
"""simple docstring"""
a , a , a : int = get_basic_setup(snake_case , snake_case , snake_case )
a , a : int = generate_predictions(snake_case , snake_case , snake_case )
assert (
len(snake_case ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case )}"""
def SCREAMING_SNAKE_CASE__ ( snake_case : bool = False , snake_case : bool = False ) -> List[str]:
"""simple docstring"""
a : int = evaluate.load('glue' , 'mrpc' )
a , a : Tuple = get_mrpc_setup(snake_case , snake_case )
# First do baseline
a , a , a : Tuple = setup['no']
model.to(snake_case )
model.eval()
for batch in dataloader:
batch.to(snake_case )
with torch.inference_mode():
a : List[Any] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case , references=batch['labels'] )
a : Tuple = metric.compute()
# Then do distributed
a , a , a : Tuple = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
a : List[str] = model(**snake_case )
a : Optional[Any] = outputs.logits.argmax(dim=-1 )
a : Optional[int] = batch['labels']
a , a : Optional[int] = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case , references=snake_case )
a : str = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def SCREAMING_SNAKE_CASE__ ( ) -> str:
"""simple docstring"""
a : Dict = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('**Testing gather_for_metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(snake_case , snake_case )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test torch metrics**' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
a : List[Any] = Accelerator(split_batches=snake_case , dispatch_batches=snake_case )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(snake_case , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('**Test last batch is not dropped when perfectly divisible**' )
a : Optional[Any] = Accelerator()
test_torch_metrics(snake_case , 512 )
accelerator.state._reset_state()
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 345 | 1 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
UpperCamelCase : Dict = get_tests_dir("""fixtures""")
UpperCamelCase : Optional[Any] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
UpperCamelCase : Any = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[Any] = 0
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : Optional[int] = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h')
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
a : Dict = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
a : List[Any] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_).to_dict()
config_dict.pop('feature_extractor_type')
a : Dict = WavaVecaFeatureExtractor(**UpperCAmelCase_)
# save in new folder
model_config.save_pretrained(UpperCAmelCase_)
config.save_pretrained(UpperCAmelCase_)
a : Union[str, Any] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_)
# make sure private variable is not incorrectly saved
a : Tuple = json.loads(config.to_json_string())
self.assertTrue('_processor_class' not in dict_as_saved)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , 'bert-base is not a local folder and is not a valid model identifier'):
a : Dict = AutoFeatureExtractor.from_pretrained('bert-base')
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , r'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)'):
a : Tuple = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ , revision='aaaaaa')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
with self.assertRaisesRegex(
UpperCAmelCase_ , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
a : List[str] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
with self.assertRaises(UpperCAmelCase_):
a : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase_):
a : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=UpperCAmelCase_)
a : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=UpperCAmelCase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCAmelCase_)
a : int = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ , trust_remote_code=UpperCAmelCase_)
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
try:
AutoConfig.register('custom' , UpperCAmelCase_)
AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_)
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase_):
AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_)
# Now that the config is registered, it can be used as any other config with the auto-API
a : Tuple = CustomFeatureExtractor.from_pretrained(UpperCAmelCase_)
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(UpperCAmelCase_)
a : Optional[Any] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_)
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[Any] = True
try:
AutoConfig.register('custom' , UpperCAmelCase_)
AutoFeatureExtractor.register(UpperCAmelCase_ , UpperCAmelCase_)
# If remote code is not set, the default is to use local
a : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor')
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote code is disabled, we load the local one.
a : Tuple = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=UpperCAmelCase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(feature_extractor.is_local)
# If remote is enabled, we load from the Hub
a : str = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=UpperCAmelCase_)
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor')
self.assertTrue(not hasattr(UpperCAmelCase_ , 'is_local'))
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 345 | '''simple docstring'''
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = ["vqvae"]
def __init__( self : List[str] , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Mel , UpperCAmelCase_ : Union[DDIMScheduler, DDPMScheduler] , ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , mel=UpperCAmelCase_ , vqvae=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
return 5_0 if isinstance(self.scheduler , UpperCAmelCase_) else 1_0_0_0
@torch.no_grad()
def __call__( self : Dict , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : str = None , UpperCAmelCase_ : np.ndarray = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : int = None , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Generator = None , UpperCAmelCase_ : float = 0 , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : torch.Tensor = None , UpperCAmelCase_ : Optional[Any]=True , ):
"""simple docstring"""
a : Optional[Any] = steps or self.get_default_steps()
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Optional[Any] = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
a : Dict = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
a : Dict = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=UpperCAmelCase_ , device=self.device , )
a : Tuple = noise
a : Optional[int] = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(UpperCAmelCase_ , UpperCAmelCase_)
a : List[Any] = self.mel.audio_slice_to_image(UpperCAmelCase_)
a : str = np.frombuffer(input_image.tobytes() , dtype='uint8').reshape(
(input_image.height, input_image.width))
a : List[str] = (input_image / 2_5_5) * 2 - 1
a : Any = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
a : List[Any] = self.vqvae.encode(torch.unsqueeze(UpperCAmelCase_ , 0)).latent_dist.sample(
generator=UpperCAmelCase_)[0]
a : str = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
a : Union[str, Any] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler.timesteps[start_step - 1])
a : Dict = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
a : List[Any] = int(mask_start_secs * pixels_per_second)
a : Optional[Any] = int(mask_end_secs * pixels_per_second)
a : Optional[int] = self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , UpperCAmelCase_):
a : Dict = self.unet(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)['sample']
else:
a : str = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
if isinstance(self.scheduler , UpperCAmelCase_):
a : List[Any] = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , eta=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
else:
a : Any = self.scheduler.step(
model_output=UpperCAmelCase_ , timestep=UpperCAmelCase_ , sample=UpperCAmelCase_ , generator=UpperCAmelCase_ , )['prev_sample']
if mask is not None:
if mask_start > 0:
a : str = mask[:, step, :, :mask_start]
if mask_end > 0:
a : Dict = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
a : List[str] = 1 / self.vqvae.config.scaling_factor * images
a : str = self.vqvae.decode(UpperCAmelCase_)['sample']
a : Tuple = (images / 2 + 0.5).clamp(0 , 1)
a : Any = images.cpu().permute(0 , 2 , 3 , 1).numpy()
a : List[str] = (images * 2_5_5).round().astype('uint8')
a : Tuple = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(UpperCAmelCase_ , mode='RGB').convert('L') for _ in images))
a : List[str] = [self.mel.image_to_audio(UpperCAmelCase_) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(UpperCAmelCase_)[:, np.newaxis, :]) , **ImagePipelineOutput(UpperCAmelCase_))
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : List[Image.Image] , UpperCAmelCase_ : int = 5_0):
"""simple docstring"""
assert isinstance(self.scheduler , UpperCAmelCase_)
self.scheduler.set_timesteps(UpperCAmelCase_)
a : Dict = np.array(
[np.frombuffer(image.tobytes() , dtype='uint8').reshape((1, image.height, image.width)) for image in images])
a : Tuple = (sample / 2_5_5) * 2 - 1
a : int = torch.Tensor(UpperCAmelCase_).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
a : Optional[Any] = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
a : Optional[Any] = self.scheduler.alphas_cumprod[t]
a : List[Any] = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
a : List[str] = 1 - alpha_prod_t
a : Optional[Any] = self.unet(UpperCAmelCase_ , UpperCAmelCase_)['sample']
a : Union[str, Any] = (1 - alpha_prod_t_prev) ** 0.5 * model_output
a : Dict = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
a : Union[str, Any] = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : torch.Tensor , UpperCAmelCase_ : float):
"""simple docstring"""
a : List[Any] = acos(torch.dot(torch.flatten(UpperCAmelCase_) , torch.flatten(UpperCAmelCase_)) / torch.norm(UpperCAmelCase_) / torch.norm(UpperCAmelCase_))
return sin((1 - alpha) * theta) * xa / sin(UpperCAmelCase_) + sin(alpha * theta) * xa / sin(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
from typing import Any
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = data
a : Optional[int] = None
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple):
"""simple docstring"""
a : Tuple = None
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : Tuple = self.head
while temp is not None:
print(temp.data , end=' ')
a : List[Any] = temp.next
print()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = Node(UpperCAmelCase_)
a : Tuple = self.head
a : Optional[int] = new_node
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
if node_data_a == node_data_a:
return
else:
a : Optional[Any] = self.head
while node_a is not None and node_a.data != node_data_a:
a : str = node_a.next
a : Dict = self.head
while node_a is not None and node_a.data != node_data_a:
a : Any = node_a.next
if node_a is None or node_a is None:
return
a , a : str = node_a.data, node_a.data
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print("""After swapping""")
ll.print_list()
| 345 | '''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]=1_3 , UpperCAmelCase_ : List[str]=3_0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Union[str, Any]=3 , UpperCAmelCase_ : str=True , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Union[str, Any]=3_2 , UpperCAmelCase_ : Union[str, Any]=5 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : Dict=3_7 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : Tuple=2 , ):
"""simple docstring"""
a : Any = parent
a : Optional[int] = batch_size
a : str = image_size
a : str = patch_size
a : List[Any] = num_channels
a : Optional[int] = is_training
a : Dict = use_labels
a : Any = hidden_size
a : Optional[int] = num_hidden_layers
a : int = num_attention_heads
a : int = intermediate_size
a : Any = hidden_act
a : Optional[int] = hidden_dropout_prob
a : Optional[int] = attention_probs_dropout_prob
a : Dict = type_sequence_label_size
a : Tuple = initializer_range
a : List[str] = scope
a : str = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a : Optional[Any] = (image_size // patch_size) ** 2
a : str = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
a : List[Any] = None
if self.use_labels:
a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : List[str] = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : int = ViTModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = ViTForMaskedImageModeling(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size))
# test greyscale images
a : int = 1
a : Union[str, Any] = ViTForMaskedImageModeling(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[Any] = model(UpperCAmelCase_)
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : str = self.type_sequence_label_size
a : Tuple = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
# test greyscale images
a : List[Any] = 1
a : Union[str, Any] = ViTForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size])
a : Optional[int] = model(UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) ,
) : Tuple = config_and_inputs
a : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : str = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
A : Optional[Any] = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
A : List[str] = True
A : Optional[int] = False
A : Dict = False
A : Optional[int] = False
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : str = ViTModelTester(self)
a : Optional[Any] = ConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_ , hidden_size=3_7)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds')
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase_ , nn.Linear))
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a , a : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a : Union[str, Any] = model_class(UpperCAmelCase_)
a : Tuple = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a : Dict = [*signature.parameters.keys()]
a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Dict = ViTModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[Any]:
"""simple docstring"""
a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224') if is_vision_available() else None
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Optional[Any] = ViTForImageClassification.from_pretrained('google/vit-base-patch16-224').to(UpperCAmelCase_)
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt').to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(**UpperCAmelCase_)
# verify the logits
a : List[str] = torch.Size((1, 1_0_0_0))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
a : Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : List[str] = ViTModel.from_pretrained('facebook/dino-vits8').to(UpperCAmelCase_)
a : Any = ViTImageProcessor.from_pretrained('facebook/dino-vits8' , size=4_8_0)
a : int = prepare_img()
a : List[str] = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : List[str] = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
a : List[Any] = model(UpperCAmelCase_ , interpolate_pos_encoding=UpperCAmelCase_)
# verify the logits
a : Dict = torch.Size((1, 3_6_0_1, 3_8_4))
self.assertEqual(outputs.last_hidden_state.shape , UpperCAmelCase_)
a : str = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCAmelCase_ , atol=1e-4))
@slow
@require_accelerate
@require_torch_gpu
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : str = ViTModel.from_pretrained('facebook/dino-vits8' , torch_dtype=torch.floataa , device_map='auto')
a : List[Any] = self.default_image_processor
a : List[str] = prepare_img()
a : Tuple = image_processor(images=UpperCAmelCase_ , return_tensors='pt')
a : Tuple = inputs.pixel_values.to(UpperCAmelCase_)
# forward pass to make sure inference works in fp16
with torch.no_grad():
a : Tuple = model(UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import math
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Union[str, Any]=0): # a graph with Node 0,1,...,N-1
"""simple docstring"""
a : Any = n
a : Optional[Any] = [
[math.inf for j in range(0 , UpperCAmelCase_)] for i in range(0 , UpperCAmelCase_)
] # adjacency matrix for weight
a : Any = [
[math.inf for j in range(0 , UpperCAmelCase_)] for i in range(0 , UpperCAmelCase_)
] # dp[i][j] stores minimum distance from i to j
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : Any , UpperCAmelCase_ : int):
"""simple docstring"""
a : Optional[int] = w
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for k in range(0 , self.n):
for i in range(0 , self.n):
for j in range(0 , self.n):
a : Dict = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j])
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any):
"""simple docstring"""
return self.dp[u][v]
if __name__ == "__main__":
UpperCamelCase : Union[str, Any] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 10)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 10)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 345 | '''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = (
"This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image."
"It takes two arguments named `image` which should be the original image, and `label` which should be a text "
"describing the elements what should be identified in the segmentation mask. The tool returns the mask."
)
A : List[Any] = "CIDAS/clipseg-rd64-refined"
A : Optional[Any] = "image_segmenter"
A : List[Any] = CLIPSegForImageSegmentation
A : Tuple = ["image", "text"]
A : Optional[int] = ["image"]
def __init__( self : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : str):
"""simple docstring"""
requires_backends(self , ['vision'])
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : "Image" , UpperCAmelCase_ : str):
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase_ , return_tensors='pt')
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
with torch.no_grad():
a : Union[str, Any] = self.model(**UpperCAmelCase_).logits
return logits
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : int):
"""simple docstring"""
a : int = outputs.cpu().detach().numpy()
a : str = 0
a : str = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta))
| 345 | 1 |
'''simple docstring'''
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
UpperCamelCase : Any = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
UpperCamelCase : int = """\
@inproceedings{banarjee2005,
title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},
author = {Banerjee, Satanjeev and Lavie, Alon},
booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},
month = jun,
year = {2005},
address = {Ann Arbor, Michigan},
publisher = {Association for Computational Linguistics},
url = {https://www.aclweb.org/anthology/W05-0909},
pages = {65--72},
}
"""
UpperCamelCase : Any = """\
METEOR, an automatic metric for machine translation evaluation
that is based on a generalized concept of unigram matching between the
machine-produced translation and human-produced reference translations.
Unigrams can be matched based on their surface forms, stemmed forms,
and meanings; furthermore, METEOR can be easily extended to include more
advanced matching strategies. Once all generalized unigram matches
between the two strings have been found, METEOR computes a score for
this matching using a combination of unigram-precision, unigram-recall, and
a measure of fragmentation that is designed to directly capture how
well-ordered the matched words in the machine translation are in relation
to the reference.
METEOR gets an R correlation value of 0.347 with human evaluation on the Arabic
data and 0.331 on the Chinese data. This is shown to be an improvement on
using simply unigram-precision, unigram-recall and their harmonic F1
combination.
"""
UpperCamelCase : str = """
Computes METEOR score of translated segments against one or more references.
Args:
predictions: list of predictions to score. Each prediction
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
alpha: Parameter for controlling relative weights of precision and recall. default: 0.9
beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3
gamma: Relative weight assigned to fragmentation penalty. default: 0.5
Returns:
'meteor': meteor score.
Examples:
>>> meteor = datasets.load_metric('meteor')
>>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]
>>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]
>>> results = meteor.compute(predictions=predictions, references=references)
>>> print(round(results[\"meteor\"], 4))
0.6944
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence'),
'references': datasets.Value('string' , id='sequence'),
}) , codebase_urls=['https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py'] , reference_urls=[
'https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score',
'https://en.wikipedia.org/wiki/METEOR',
] , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : str):
"""simple docstring"""
import nltk
nltk.download('wordnet')
if NLTK_VERSION >= version.Version('3.6.5'):
nltk.download('punkt')
if NLTK_VERSION >= version.Version('3.6.6'):
nltk.download('omw-1.4')
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : Optional[int]=0.9 , UpperCAmelCase_ : List[str]=3 , UpperCAmelCase_ : Optional[int]=0.5):
"""simple docstring"""
if NLTK_VERSION >= version.Version('3.6.5'):
a : Union[str, Any] = [
meteor_score.single_meteor_score(
word_tokenize(UpperCAmelCase_) , word_tokenize(UpperCAmelCase_) , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , gamma=UpperCAmelCase_)
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
else:
a : Optional[int] = [
meteor_score.single_meteor_score(UpperCAmelCase_ , UpperCAmelCase_ , alpha=UpperCAmelCase_ , beta=UpperCAmelCase_ , gamma=UpperCAmelCase_)
for ref, pred in zip(UpperCAmelCase_ , UpperCAmelCase_)
]
return {"meteor": np.mean(UpperCAmelCase_)}
| 345 | '''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Union[str, Any] = CTRLTokenizer
A : List[Any] = False
A : Optional[Any] = False
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Dict = ['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
a : Tuple = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : Any = ['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
a : List[Any] = {'unk_token': '<unk>'}
a : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , **UpperCAmelCase_ : Dict):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Any):
"""simple docstring"""
a : List[str] = 'adapt react readapt apt'
a : int = 'adapt react readapt apt'
return input_text, output_text
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map)
a : str = 'adapt react readapt apt'
a : Optional[Any] = 'adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
a : List[Any] = tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
a : Union[str, Any] = tokens + [tokenizer.unk_token]
a : Any = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | '''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | 1 |
'''simple docstring'''
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Dict=False ) -> int:
"""simple docstring"""
try:
a : Tuple = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a : str = default
else:
# KEY is set, convert it to True or False.
try:
a : List[str] = strtobool(snake_case )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
UpperCamelCase : str = parse_flag_from_env("""RUN_SLOW""", default=False)
UpperCamelCase : Optional[int] = parse_flag_from_env("""RUN_REMOTE""", default=False)
UpperCamelCase : Union[str, Any] = parse_flag_from_env("""RUN_LOCAL""", default=True)
UpperCamelCase : List[Any] = parse_flag_from_env("""RUN_PACKAGED""", default=True)
# Compression
UpperCamelCase : Tuple = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""")
UpperCamelCase : Tuple = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""")
UpperCamelCase : Union[str, Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""")
# Audio
UpperCamelCase : List[Any] = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""),
reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """,
)
# Beam
UpperCamelCase : Dict = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""),
reason="""test requires apache-beam and a compatible dill version""",
)
# Dill-cloudpickle compatibility
UpperCamelCase : Optional[Any] = pytest.mark.skipif(
config.DILL_VERSION <= version.parse("""0.3.2"""),
reason="""test requires dill>0.3.2 for cloudpickle compatibility""",
)
# Windows
UpperCamelCase : Dict = pytest.mark.skipif(
sys.platform == """win32""",
reason="""test should not be run on Windows""",
)
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> Optional[Any]:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
a : Dict = unittest.skip('test requires faiss' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> List[Any]:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
a : int = unittest.skip('test requires regex' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
a : List[Any] = unittest.skip('test requires elasticsearch' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
a : Dict = unittest.skip('test requires sqlalchemy' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
a : str = unittest.skip('test requires PyTorch' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Tuple:
"""simple docstring"""
if not config.TF_AVAILABLE:
a : Tuple = unittest.skip('test requires TensorFlow' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
if not config.JAX_AVAILABLE:
a : Optional[Any] = unittest.skip('test requires JAX' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
a : str = unittest.skip('test requires Pillow' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip('test requires transformers' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : int ) -> List[str]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip('test requires tiktoken' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip('test requires spacy' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Dict ) -> Union[str, Any]:
"""simple docstring"""
def _require_spacy_model(snake_case : Optional[int] ):
try:
import spacy # noqa F401
spacy.load(snake_case )
except ImportError:
return unittest.skip('test requires spacy' )(snake_case )
except OSError:
return unittest.skip('test requires spacy model \'{}\''.format(snake_case ) )(snake_case )
else:
return test_case
return _require_spacy_model
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[Any]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip('test requires pyspark' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip('test requires joblibspark' )(snake_case )
else:
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> List[str]:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
a : List[Any] = unittest.skip('test is slow' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
a : Dict = unittest.skip('test is local' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> int:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
a : Optional[Any] = unittest.skip('test is packaged' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
a : Union[str, Any] = unittest.skip('test requires remote' )(snake_case )
return test_case
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] ) -> Tuple:
"""simple docstring"""
def decorate(cls : Optional[int] ):
for name, fn in cls.__dict__.items():
if callable(snake_case ) and name.startswith('test' ):
for decorator in decorators:
a : Tuple = decorator(snake_case )
setattr(cls , snake_case , snake_case )
return cls
return decorate
class UpperCamelCase ( a_ ):
"""simple docstring"""
pass
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : List[str] = 0
A : Optional[int] = 1
A : Tuple = 2
@contextmanager
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str]=OfflineSimulationMode.CONNECTION_FAILS , snake_case : List[Any]=1E-1_6 ) -> Dict:
"""simple docstring"""
a : List[Any] = requests.Session().request
def timeout_request(snake_case : Optional[int] , snake_case : Tuple , snake_case : List[Any] , **snake_case : Optional[int] ):
# Change the url to an invalid url so that the connection hangs
a : List[str] = 'https://10.255.255.1'
if kwargs.get('timeout' ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
a : Any = timeout
try:
return online_request(snake_case , snake_case , **snake_case )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
a : str = url
a : Union[str, Any] = e.args[0]
a : Dict = (max_retry_error.args[0].replace('10.255.255.1' , F"""OfflineMock[{url}]""" ),)
a : Tuple = (max_retry_error,)
raise
def raise_connection_error(snake_case : Union[str, Any] , snake_case : Optional[Any] , **snake_case : Any ):
raise requests.ConnectionError('Offline mode is enabled.' , request=snake_case )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch('requests.Session.send' , snake_case ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch('requests.Session.request' , snake_case ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch('datasets.config.HF_DATASETS_OFFLINE' , snake_case ):
yield
else:
raise ValueError('Please use a value from the OfflineSimulationMode enum.' )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( *snake_case : Union[str, Any] , **snake_case : int ) -> Any:
"""simple docstring"""
a : str = str(Path().resolve() )
with tempfile.TemporaryDirectory(*snake_case , **snake_case ) as tmp_dir:
try:
os.chdir(snake_case )
yield
finally:
os.chdir(snake_case )
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
import gc
gc.collect()
a : List[Any] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
a : List[str] = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
return deepcopy(snake_case ).integers(0 , 100 , 10 ).tolist() == deepcopy(snake_case ).integers(0 , 100 , 10 ).tolist()
def SCREAMING_SNAKE_CASE__ ( snake_case : Any ) -> Any:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(snake_case : Tuple , *snake_case : Tuple , **snake_case : str ):
try:
return func(*snake_case , **snake_case )
except HTTPError as err:
if str(snake_case ).startswith('500' ) or str(snake_case ).startswith('502' ):
pytest.xfail(str(snake_case ) )
raise err
return decorator.decorator(_wrapper , snake_case )
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Optional[int] = returncode
a : Optional[Any] = stdout
a : Dict = stderr
async def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[Any] ) -> Any:
"""simple docstring"""
while True:
a : Tuple = await stream.readline()
if line:
callback(snake_case )
else:
break
async def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : int=None , snake_case : Union[str, Any]=None , snake_case : Optional[Any]=None , snake_case : Optional[int]=False , snake_case : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('\nRunning: ' , ' '.join(snake_case ) )
a : int = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=snake_case , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=snake_case , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a : int = []
a : List[str] = []
def tee(snake_case : int , snake_case : Union[str, Any] , snake_case : List[str] , snake_case : List[str]="" ):
a : Any = line.decode('utf-8' ).rstrip()
sink.append(snake_case )
if not quiet:
print(snake_case , snake_case , file=snake_case )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda snake_case : tee(snake_case , snake_case , sys.stdout , label='stdout:' ) ),
_read_stream(p.stderr , lambda snake_case : tee(snake_case , snake_case , sys.stderr , label='stderr:' ) ),
] , timeout=snake_case , )
return _RunOutput(await p.wait() , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Union[str, Any]=None , snake_case : List[Any]=None , snake_case : Dict=180 , snake_case : List[Any]=False , snake_case : Union[str, Any]=True ) -> _RunOutput:
"""simple docstring"""
a : Union[str, Any] = asyncio.get_event_loop()
a : Tuple = loop.run_until_complete(
_stream_subprocess(snake_case , env=snake_case , stdin=snake_case , timeout=snake_case , quiet=snake_case , echo=snake_case ) )
a : Optional[int] = ' '.join(snake_case )
if result.returncode > 0:
a : List[Any] = '\n'.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def SCREAMING_SNAKE_CASE__ ( ) -> Union[str, Any]:
"""simple docstring"""
a : Union[str, Any] = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' )
a : List[str] = re.sub(R'^gw' , '' , snake_case , 0 , re.M )
return int(snake_case )
def SCREAMING_SNAKE_CASE__ ( ) -> Tuple:
"""simple docstring"""
a : List[Any] = 29_500
a : int = pytest_xdist_worker_id()
return port + uniq_delta
| 345 | '''simple docstring'''
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
UpperCamelCase : int = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Dict = ["pixel_values"]
def __init__( self : Optional[Any] , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : List[str] = size if size is not None else {'shortest_edge': 2_2_4}
a : str = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : Dict = size
a : Optional[Any] = resample
a : List[Any] = do_center_crop
a : List[Any] = crop_size
a : Optional[Any] = do_rescale
a : Dict = rescale_factor
a : Tuple = do_normalize
a : int = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
a : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
a : int = int((2_5_6 / 2_2_4) * size['shortest_edge'])
a : Optional[int] = get_resize_output_image_size(UpperCAmelCase_ , size=UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : Optional[Any] = {'height': output_size[0], 'width': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
f"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""")
return resize(
UpperCAmelCase_ , size=(size_dict['height'], size_dict['width']) , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Union[str, Any] , ):
"""simple docstring"""
a : str = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[int, float] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[Union[float, Iterable[float]]] = None , UpperCAmelCase_ : Optional[TensorType] = None , UpperCAmelCase_ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : Optional[int] = resample if resample is not None else self.resample
a : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Tuple = do_rescale if do_rescale is not None else self.do_rescale
a : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
a : Dict = do_normalize if do_normalize is not None else self.do_normalize
a : Tuple = image_mean if image_mean is not None else self.image_mean
a : int = image_std if image_std is not None else self.image_std
a : Optional[int] = size if size is not None else self.size
a : Optional[Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : List[Any] = crop_size if crop_size is not None else self.crop_size
a : str = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : Any = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Optional[int] = [self.resize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_center_crop:
a : int = [self.center_crop(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_rescale:
a : Any = [self.rescale(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
if do_normalize:
a : str = [self.normalize(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : Optional[int] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( a_ , unittest.TestCase ):
"""simple docstring"""
A : Optional[Any] = LEDTokenizer
A : str = LEDTokenizerFast
A : Optional[Any] = True
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
super().setUp()
a : Tuple = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
a : Union[str, Any] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
a : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a : Any = {'unk_token': '<unk>'}
a : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as fp:
fp.write(json.dumps(UpperCAmelCase_) + '\n')
with open(self.merges_file , 'w' , encoding='utf-8') as fp:
fp.write('\n'.join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
return LEDTokenizer.from_pretrained('allenai/led-base-16384')
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return LEDTokenizerFast.from_pretrained('allenai/led-base-16384')
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
a : List[str] = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : List[str] = tokenizer(UpperCAmelCase_ , max_length=len(UpperCAmelCase_) , padding=UpperCAmelCase_ , return_tensors='pt')
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
a : int = batch.input_ids.tolist()[0]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : int = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : List[Any] = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_ , return_tensors='pt')
self.assertIn('input_ids' , UpperCAmelCase_)
self.assertIn('attention_mask' , UpperCAmelCase_)
self.assertNotIn('labels' , UpperCAmelCase_)
self.assertNotIn('decoder_attention_mask' , UpperCAmelCase_)
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : str = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : Dict = tokenizer(text_target=UpperCAmelCase_ , max_length=3_2 , padding='max_length' , return_tensors='pt')
self.assertEqual(3_2 , targets['input_ids'].shape[1])
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : Any = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , return_tensors='pt')
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertEqual(batch.input_ids.shape , (2, 5_1_2_2))
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Union[str, Any] = ['A long paragraph for summarization.']
a : List[Any] = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : Optional[Any] = tokenizer(UpperCAmelCase_ , return_tensors='pt')
a : Any = tokenizer(text_target=UpperCAmelCase_ , return_tensors='pt')
a : Dict = inputs['input_ids']
a : Optional[int] = targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item())
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item())
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item())
@require_torch
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
a : Any = ['Summary of the text.', 'Another summary.']
a : Tuple = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
a : Dict = tokenizer(UpperCAmelCase_ , padding=UpperCAmelCase_)
a : int = [[0] * len(UpperCAmelCase_) for x in encoded_output['input_ids']]
a : Optional[Any] = tokenizer.pad(UpperCAmelCase_)
self.assertSequenceEqual(outputs['global_attention_mask'] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
a : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
a : Optional[int] = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_)
a : int = 'A, <mask> AllenNLP sentence.'
a : Any = tokenizer_r.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_)
a : int = tokenizer_p.encode_plus(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_)
self.assertEqual(sum(tokens_r['token_type_ids']) , sum(tokens_p['token_type_ids']))
self.assertEqual(
sum(tokens_r['attention_mask']) / len(tokens_r['attention_mask']) , sum(tokens_p['attention_mask']) / len(tokens_p['attention_mask']) , )
a : str = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'])
a : Any = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'])
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2])
self.assertSequenceEqual(
UpperCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
self.assertSequenceEqual(
UpperCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'])
| 345 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : float | Decimal , snake_case : float = 10**-10 ) -> float:
"""simple docstring"""
a : Dict = a
while True:
a : Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''')
# Find root of polynomial
print(f'''The root of x**2 - 5*x + 2 = 0 is {newton_raphson("x**2 - 5*x + 2", 0.4)}''')
# Find Square Root of 5
print(f'''The root of log(x) - 1 = 0 is {newton_raphson("log(x) - 1", 2)}''')
# Exponential Roots
print(f'''The root of exp(x) - 1 = 0 is {newton_raphson("exp(x) - 1", 0)}''')
| 345 | 1 |
'''simple docstring'''
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
UpperCamelCase : List[Any] = {
# 1536-bit
5: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 2048-bit
14: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AACAA68FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 3072-bit
15: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 4096-bit
16: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"""
+ """FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 6144-bit
17: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"""
+ """8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"""
+ """302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"""
+ """A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"""
+ """49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"""
+ """FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"""
+ """180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"""
+ """3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"""
+ """04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"""
+ """B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"""
+ """1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"""
+ """E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"""
+ """99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"""
+ """04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"""
+ """233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"""
+ """D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"""
+ """AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"""
+ """DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"""
+ """2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"""
+ """F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"""
+ """BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"""
+ """B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"""
+ """387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"""
+ """6DCC4024FFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
# 8192-bit
18: {
"""prime""": int(
"""FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"""
+ """29024E088A67CC74020BBEA63B139B22514A08798E3404DD"""
+ """EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"""
+ """E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"""
+ """EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"""
+ """C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"""
+ """83655D23DCA3AD961C62F356208552BB9ED529077096966D"""
+ """670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"""
+ """E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"""
+ """DE2BCBF6955817183995497CEA956AE515D2261898FA0510"""
+ """15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"""
+ """ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"""
+ """ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"""
+ """F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"""
+ """BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"""
+ """43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"""
+ """88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"""
+ """2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"""
+ """287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"""
+ """1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"""
+ """93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"""
+ """36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"""
+ """F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"""
+ """179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"""
+ """DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"""
+ """5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"""
+ """D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"""
+ """23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"""
+ """CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"""
+ """06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"""
+ """DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"""
+ """12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"""
+ """38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"""
+ """741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"""
+ """3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"""
+ """22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"""
+ """4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"""
+ """062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"""
+ """4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"""
+ """B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"""
+ """4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"""
+ """9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"""
+ """60C980DD98EDD3DFFFFFFFFFFFFFFFFF""",
base=16,
),
"""generator""": 2,
},
}
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase_ : int = 1_4):
"""simple docstring"""
if group not in primes:
raise ValueError('Unsupported Group')
a : List[Any] = primes[group]['prime']
a : List[Any] = primes[group]['generator']
a : Tuple = int(hexlify(urandom(3_2)) , base=1_6)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
return hex(self.__private_key)[2:]
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : List[Any] = pow(self.generator , self.__private_key , self.prime)
return hex(UpperCAmelCase_)[2:]
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : int):
"""simple docstring"""
return (
2 <= key <= self.prime - 2
and pow(UpperCAmelCase_ , (self.prime - 1) // 2 , self.prime) == 1
)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str):
"""simple docstring"""
a : Dict = int(UpperCAmelCase_ , base=1_6)
if not self.is_valid_public_key(UpperCAmelCase_):
raise ValueError('Invalid public key')
a : List[str] = pow(UpperCAmelCase_ , self.__private_key , self.prime)
return shaaaa(str(UpperCAmelCase_).encode()).hexdigest()
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : int , UpperCAmelCase_ : int):
"""simple docstring"""
return (
2 <= remote_public_key_str <= prime - 2
and pow(UpperCAmelCase_ , (prime - 1) // 2 , UpperCAmelCase_) == 1
)
@staticmethod
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : int = 1_4):
"""simple docstring"""
a : int = int(UpperCAmelCase_ , base=1_6)
a : str = int(UpperCAmelCase_ , base=1_6)
a : Any = primes[group]['prime']
if not DiffieHellman.is_valid_public_key_static(UpperCAmelCase_ , UpperCAmelCase_):
raise ValueError('Invalid public key')
a : Optional[Any] = pow(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
return shaaaa(str(UpperCAmelCase_).encode()).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[int]=1_3 , UpperCAmelCase_ : Dict=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : Optional[Any]=9_9 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=3_2 , UpperCAmelCase_ : Tuple=5 , UpperCAmelCase_ : Any=4 , UpperCAmelCase_ : Tuple=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=4 , UpperCAmelCase_ : int="last" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=0 , ):
"""simple docstring"""
a : Tuple = parent
a : Optional[Any] = batch_size
a : Tuple = seq_length
a : Union[str, Any] = is_training
a : List[str] = use_input_lengths
a : Union[str, Any] = use_token_type_ids
a : Optional[int] = use_labels
a : int = gelu_activation
a : Dict = sinusoidal_embeddings
a : Any = causal
a : Optional[int] = asm
a : int = n_langs
a : List[str] = vocab_size
a : List[str] = n_special
a : List[str] = hidden_size
a : Any = num_hidden_layers
a : Union[str, Any] = num_attention_heads
a : Optional[Any] = hidden_dropout_prob
a : str = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Union[str, Any] = type_sequence_label_size
a : str = initializer_range
a : List[Any] = num_labels
a : Union[str, Any] = num_choices
a : Optional[Any] = summary_type
a : Optional[Any] = use_proj
a : Optional[Any] = scope
a : Dict = bos_token_id
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
a : List[Any] = random_attention_mask([self.batch_size, self.seq_length])
a : Optional[int] = None
if self.use_input_lengths:
a : Optional[int] = (
ids_tensor([self.batch_size] , vocab_size=2) + self.seq_length - 2
) # small variation of seq_length
a : int = None
if self.use_token_type_ids:
a : str = ids_tensor([self.batch_size, self.seq_length] , self.n_langs)
a : Optional[Any] = None
a : Tuple = None
a : Optional[Any] = None
if self.use_labels:
a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
a : Optional[Any] = ids_tensor([self.batch_size] , 2).float()
a : Dict = ids_tensor([self.batch_size] , self.num_choices)
a : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Any = XLMModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_ , lengths=UpperCAmelCase_ , langs=UpperCAmelCase_)
a : str = model(UpperCAmelCase_ , langs=UpperCAmelCase_)
a : int = model(UpperCAmelCase_)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] , ):
"""simple docstring"""
a : Optional[Any] = XLMWithLMHeadModel(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
a : Union[str, Any] = XLMForQuestionAnsweringSimple(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : List[str] = model(UpperCAmelCase_)
a : Tuple = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
a : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Any = XLMForQuestionAnswering(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : int = model(UpperCAmelCase_)
a : Dict = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , p_mask=UpperCAmelCase_ , )
a : int = model(
UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_ , cls_index=UpperCAmelCase_ , is_impossible=UpperCAmelCase_ , )
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
a : int = model(UpperCAmelCase_ , start_positions=UpperCAmelCase_ , end_positions=UpperCAmelCase_)
((a) , ) : Union[str, Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , ())
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top))
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top))
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : int , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , ):
"""simple docstring"""
a : Dict = XLMForSequenceClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = model(UpperCAmelCase_)
a : Union[str, Any] = model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , ):
"""simple docstring"""
a : Dict = self.num_labels
a : int = XLMForTokenClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Optional[Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict , ):
"""simple docstring"""
a : str = self.num_choices
a : Dict = XLMForMultipleChoice(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
a : Union[str, Any] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : str = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Optional[int] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
a : Any = model(
UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , labels=UpperCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any]):
"""simple docstring"""
a : int = self.prepare_config_and_inputs()
(
(
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) , (
a
) ,
) : Union[str, Any] = config_and_inputs
a : Optional[int] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class UpperCamelCase ( a_ , a_ , a_ , unittest.TestCase ):
"""simple docstring"""
A : int = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
A : List[str] = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
A : Optional[Any] = (
{
"feature-extraction": XLMModel,
"fill-mask": XLMWithLMHeadModel,
"question-answering": XLMForQuestionAnsweringSimple,
"text-classification": XLMForSequenceClassification,
"text-generation": XLMWithLMHeadModel,
"token-classification": XLMForTokenClassification,
"zero-shot": XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any , UpperCAmelCase_ : Any):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast')
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict=False):
"""simple docstring"""
a : List[Any] = super()._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ , return_labels=UpperCAmelCase_)
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
a : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
a : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase_)
return inputs_dict
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[Any] = XLMModelTester(self)
a : Tuple = ConfigTester(self , config_class=UpperCAmelCase_ , emb_dim=3_7)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
a : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Any):
"""simple docstring"""
a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Optional[int]):
"""simple docstring"""
a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str]):
"""simple docstring"""
a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_attentions in attentions] , [True] * len(UpperCAmelCase_))
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_attentions in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : List[str] = min_length + idx + 1
a : Optional[Any] = min_length + idx + 1
a : Optional[int] = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str=False , UpperCAmelCase_ : Union[str, Any]=1):
"""simple docstring"""
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_)
self.assertListEqual(
[isinstance(UpperCAmelCase_ , UpperCAmelCase_) for iter_hidden_states in hidden_states] , [True] * len(UpperCAmelCase_) , )
self.assertEqual(len(UpperCAmelCase_) , (max_length - min_length) * num_beam_groups)
for idx, iter_hidden_states in enumerate(UpperCAmelCase_):
# adds PAD dummy token
a : int = min_length + idx + 1
a : Any = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(UpperCAmelCase_) , )
pass
@slow
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a : Union[str, Any] = XLMModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
@require_torch
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
a : Dict = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048')
model.to(UpperCAmelCase_)
a : List[Any] = torch.tensor([[1_4, 4_4_7]] , dtype=torch.long , device=UpperCAmelCase_) # the president
a : Dict = [
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
1_4,
4_4_7,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
a : Optional[Any] = model.generate(UpperCAmelCase_ , do_sample=UpperCAmelCase_)
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , UpperCAmelCase_)
| 345 | 1 |
'''simple docstring'''
from math import pow
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : int , snake_case : int , snake_case : int , ) -> tuple[int, int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
a : Dict = int(pow(snake_case , snake_case ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
a , a : List[str] = backtrack(
snake_case , snake_case , current_number + 1 , snake_case , snake_case )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
a , a : Union[str, Any] = backtrack(
snake_case , snake_case , current_number + 1 , snake_case , snake_case )
return current_sum, solutions_count
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
'Invalid input\n'
'needed_sum must be between 1 and 1000, power between 2 and 10.' )
return backtrack(snake_case , snake_case , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase : List[str] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Any = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[int] = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase : Tuple = {
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCamelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | 1 |
'''simple docstring'''
import math
import os
import sys
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> str:
"""simple docstring"""
a : int = ''
try:
with open(snake_case , 'rb' ) as binary_file:
a : Union[str, Any] = binary_file.read()
for dat in data:
a : Optional[int] = F"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( snake_case : dict[str, str] , snake_case : str , snake_case : int , snake_case : str ) -> None:
"""simple docstring"""
lexicon.pop(snake_case )
a : str = last_match_id
if math.loga(snake_case ).is_integer():
for curr_key in lexicon:
a : Optional[Any] = '0' + lexicon[curr_key]
a : Optional[Any] = bin(snake_case )[2:]
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> str:
"""simple docstring"""
a : List[Any] = {'0': '0', '1': '1'}
a , a : Optional[Any] = '', ''
a : Dict = len(snake_case )
for i in range(len(snake_case ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
a : int = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(snake_case , snake_case , snake_case , snake_case )
index += 1
a : str = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
a : Dict = lexicon[curr_string]
result += last_match_id
return result
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> str:
"""simple docstring"""
a : List[Any] = os.path.getsize(snake_case )
a : List[str] = bin(snake_case )[2:]
a : Tuple = len(snake_case )
return "0" * (length_length - 1) + file_length_binary + compressed
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> None:
"""simple docstring"""
a : Union[str, Any] = 8
try:
with open(snake_case , 'wb' ) as opened_file:
a : Union[str, Any] = [
to_write[i : i + byte_length]
for i in range(0 , len(snake_case ) , snake_case )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(snake_case , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str ) -> None:
"""simple docstring"""
a : List[Any] = read_file_binary(snake_case )
a : Optional[Any] = compress_data(snake_case )
a : str = add_file_length(snake_case , snake_case )
write_file_binary(snake_case , snake_case )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 345 | '''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : str = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""facebook/data2vec-base-960h""": """https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json""",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[int] = "data2vec-audio"
def __init__( self : Dict , UpperCAmelCase_ : Optional[int]=3_2 , UpperCAmelCase_ : Union[str, Any]=7_6_8 , UpperCAmelCase_ : Dict=1_2 , UpperCAmelCase_ : str=1_2 , UpperCAmelCase_ : Optional[Any]=3_0_7_2 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Union[str, Any]=0.0 , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : str=1e-5 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : Optional[Any]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCAmelCase_ : Dict=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase_ : int=(1_0, 3, 3, 3, 3, 2, 2) , UpperCAmelCase_ : List[Any]=False , UpperCAmelCase_ : List[Any]=1_6 , UpperCAmelCase_ : Optional[Any]=1_9 , UpperCAmelCase_ : int=5 , UpperCAmelCase_ : Any=0.05 , UpperCAmelCase_ : Dict=1_0 , UpperCAmelCase_ : Optional[Any]=2 , UpperCAmelCase_ : Optional[int]=0.0 , UpperCAmelCase_ : Tuple=1_0 , UpperCAmelCase_ : int=0 , UpperCAmelCase_ : Any="sum" , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Dict=False , UpperCAmelCase_ : Optional[int]=2_5_6 , UpperCAmelCase_ : Any=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCAmelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCAmelCase_ : Optional[int]=(1, 2, 3, 1, 1) , UpperCAmelCase_ : int=5_1_2 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : Tuple=1 , UpperCAmelCase_ : Union[str, Any]=2 , UpperCAmelCase_ : List[str]=False , UpperCAmelCase_ : Any=3 , UpperCAmelCase_ : Dict=2 , UpperCAmelCase_ : int=3 , UpperCAmelCase_ : List[Any]=None , **UpperCAmelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_ , pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
a : List[Any] = hidden_size
a : Any = feat_extract_activation
a : Any = list(UpperCAmelCase_)
a : Optional[int] = list(UpperCAmelCase_)
a : Dict = list(UpperCAmelCase_)
a : Tuple = conv_bias
a : str = num_conv_pos_embeddings
a : Dict = num_conv_pos_embedding_groups
a : Optional[Any] = conv_pos_kernel_size
a : Any = len(self.conv_dim)
a : Tuple = num_hidden_layers
a : Any = intermediate_size
a : Any = hidden_act
a : Dict = num_attention_heads
a : Dict = hidden_dropout
a : Union[str, Any] = attention_dropout
a : Dict = activation_dropout
a : Optional[int] = feat_proj_dropout
a : Tuple = final_dropout
a : Union[str, Any] = layerdrop
a : Tuple = layer_norm_eps
a : Dict = initializer_range
a : Tuple = vocab_size
a : int = use_weighted_layer_sum
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
a : List[str] = mask_time_prob
a : int = mask_time_length
a : Optional[int] = mask_time_min_masks
a : Dict = mask_feature_prob
a : List[str] = mask_feature_length
a : str = mask_feature_min_masks
# ctc loss
a : str = ctc_loss_reduction
a : Optional[Any] = ctc_zero_infinity
# adapter
a : List[str] = add_adapter
a : Optional[Any] = adapter_kernel_size
a : int = adapter_stride
a : str = num_adapter_layers
a : Optional[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
a : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
a : List[Any] = list(UpperCAmelCase_)
a : List[str] = list(UpperCAmelCase_)
a : str = list(UpperCAmelCase_)
a : Optional[Any] = xvector_output_dim
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return math.prod(self.conv_stride)
| 345 | 1 |
'''simple docstring'''
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
UpperCamelCase : Optional[int] = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 131_072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Union[str, Any] ) -> int:
"""simple docstring"""
return torch.atana(snake_case , snake_case ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[int]:
"""simple docstring"""
a : int = torch.sin(t * math.pi / 2 ) ** 2
a : Any = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class UpperCamelCase ( a_ ):
"""simple docstring"""
pass
class UpperCamelCase ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : int):
"""simple docstring"""
super().__init__()
a : int = DiffusionAttnUnetaD(UpperCAmelCase_ , n_attn_layers=4)
a : Union[str, Any] = deepcopy(self.diffusion)
a : Tuple = torch.quasirandom.SobolEngine(1 , scramble=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> List[str]:
"""simple docstring"""
a : Optional[Any] = MODELS_MAP[model_name]['url']
os.system(F"""wget {url} ./""" )
return F"""./{model_name}.ckpt"""
UpperCamelCase : Tuple = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
UpperCamelCase : str = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
UpperCamelCase : Optional[int] = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
UpperCamelCase : List[Any] = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
UpperCamelCase : Union[str, Any] = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
UpperCamelCase : List[str] = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] ) -> List[str]:
"""simple docstring"""
if name.startswith('skip' ):
return name.replace('skip' , RES_CONV_MAP['skip'] )
# name has to be of format main.{digit}
if not name.startswith('main.' ):
raise ValueError(F"""ResConvBlock error with {name}""" )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] ) -> int:
"""simple docstring"""
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(F"""Attn error with {name}""" )
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Tuple=13 ) -> Any:
"""simple docstring"""
a : str = input_string
if string.split('.' )[0] == "timestep_embed":
return string.replace('timestep_embed' , 'time_proj' )
a : Dict = 0
if string.startswith('net.3.' ):
depth += 1
a : Any = string[6:]
elif string.startswith('net.' ):
a : Optional[int] = string[4:]
while string.startswith('main.7.' ):
depth += 1
a : str = string[7:]
if string.startswith('main.' ):
a : Optional[Any] = string[5:]
# mid block
if string[:2].isdigit():
a : Tuple = string[:2]
a : int = string[2:]
else:
a : Any = string[0]
a : str = string[1:]
if depth == max_depth:
a : str = MID_NUM_TO_LAYER[layer_num]
a : int = 'mid_block'
elif depth > 0 and int(snake_case ) < 7:
a : int = DOWN_NUM_TO_LAYER[layer_num]
a : Optional[Any] = F"""down_blocks.{depth}"""
elif depth > 0 and int(snake_case ) > 7:
a : Any = UP_NUM_TO_LAYER[layer_num]
a : Any = F"""up_blocks.{max_depth - depth - 1}"""
elif depth == 0:
a : List[str] = DEPTH_0_TO_LAYER[layer_num]
a : List[Any] = F"""up_blocks.{max_depth - 1}""" if int(snake_case ) > 3 else 'down_blocks.0'
if not string_left.startswith('.' ):
raise ValueError(F"""Naming error with {input_string} and string_left: {string_left}.""" )
a : Optional[int] = string_left[1:]
if "resnets" in new_layer:
a : int = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
a : Any = convert_attn_naming(snake_case )
a : int = new_string_left
if not isinstance(snake_case , snake_case ):
a : Tuple = prefix + '.' + new_layer + '.' + string_left
else:
a : List[Any] = [prefix + '.' + new_layer + '.' + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : int = {}
for k, v in state_dict.items():
if k.endswith('kernel' ):
# up- and downsample layers, don't have trainable weights
continue
a : List[Any] = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
a : Any = transform_conv_attns(snake_case , snake_case , snake_case )
else:
a : Dict = v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Tuple , snake_case : Optional[int] ) -> str:
"""simple docstring"""
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
a : int = v[:, :, 0]
else:
# bias
a : List[str] = v
else:
# qkv matrices
a : Any = v.shape[0]
a : Tuple = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
a : Tuple = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
a : str = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] ) -> Dict:
"""simple docstring"""
a : List[Any] = torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
a : Any = args.model_path.split('/' )[-1].split('.' )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), F"""Make sure to provide one of the official model names {MODELS_MAP.keys()}"""
a : Any = download(snake_case )
a : List[str] = MODELS_MAP[model_name]['sample_rate']
a : Optional[int] = MODELS_MAP[model_name]['sample_size']
a : Union[str, Any] = Object()
a : Any = sample_size
a : Dict = sample_rate
a : Dict = 0
a : List[str] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
a : Any = diffusers_model.state_dict()
a : Tuple = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )['state_dict'] )
a : int = orig_model.diffusion_ema.eval()
a : Tuple = orig_model.state_dict()
a : Optional[Any] = rename_orig_weights(snake_case )
a : Tuple = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
a : Tuple = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, F"""Problem with {renamed_minus_diffusers}"""
assert all(k.endswith('kernel' ) for k in list(snake_case ) ), F"""Problem with {diffusers_minus_renamed}"""
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), F"""Shape for {key} doesn't match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}"""
if key == "time_proj.weight":
a : str = value.squeeze()
a : Optional[int] = value
diffusers_model.load_state_dict(snake_case )
a : Union[str, Any] = 100
a : Dict = 33
a : str = IPNDMScheduler(num_train_timesteps=snake_case )
a : List[Any] = torch.manual_seed(snake_case )
a : List[str] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
a : int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
a : Tuple = get_crash_schedule(snake_case )
a : Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
a : Optional[Any] = torch.manual_seed(33 )
a : Optional[Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
a : Union[str, Any] = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
a : Union[str, Any] = generated.clamp(-1 , 1 )
a : Tuple = (generated - audio).abs().sum()
a : str = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print('Diff sum' , snake_case )
print('Diff max' , snake_case )
assert diff_max < 1E-3, F"""Diff max: {diff_max} is too much :-/"""
print(F"""Conversion for {model_name} successful!""" )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
UpperCamelCase : int = parser.parse_args()
main(args)
| 345 | '''simple docstring'''
import logging
from transformers.configuration_utils import PretrainedConfig
UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Tuple = "masked_bert"
def __init__( self : Tuple , UpperCAmelCase_ : List[Any]=3_0_5_2_2 , UpperCAmelCase_ : str=7_6_8 , UpperCAmelCase_ : Optional[Any]=1_2 , UpperCAmelCase_ : Optional[int]=1_2 , UpperCAmelCase_ : Union[str, Any]=3_0_7_2 , UpperCAmelCase_ : Union[str, Any]="gelu" , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=5_1_2 , UpperCAmelCase_ : List[str]=2 , UpperCAmelCase_ : str=0.02 , UpperCAmelCase_ : Optional[Any]=1e-12 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Dict="topK" , UpperCAmelCase_ : str="constant" , UpperCAmelCase_ : Optional[Any]=0.0 , **UpperCAmelCase_ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : Union[str, Any] = vocab_size
a : List[Any] = hidden_size
a : List[str] = num_hidden_layers
a : Any = num_attention_heads
a : Optional[Any] = hidden_act
a : str = intermediate_size
a : Dict = hidden_dropout_prob
a : Any = attention_probs_dropout_prob
a : Any = max_position_embeddings
a : Dict = type_vocab_size
a : List[str] = initializer_range
a : int = layer_norm_eps
a : Dict = pruning_method
a : List[str] = mask_init
a : Union[str, Any] = mask_scale
| 345 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : list , snake_case : int , snake_case : int = 0 , snake_case : int = 0 ) -> int:
"""simple docstring"""
a : int = right or len(snake_case ) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(snake_case , snake_case , left + 1 , right - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_)
a : str = {}
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : Tuple , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : int):
"""simple docstring"""
a : Dict = super().add_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
if num_added_tokens == 0:
raise ValueError(
f"""The tokenizer already contains the token {placeholder_token}. Please pass a different"""
' `placeholder_token` that is not already in the tokenizer.')
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : List[str]=1 , **UpperCAmelCase_ : Optional[int]):
"""simple docstring"""
a : Any = []
if num_vec_per_token == 1:
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
else:
a : int = []
for i in range(UpperCAmelCase_):
a : Union[str, Any] = placeholder_token + f"""_{i}"""
self.try_adding_tokens(UpperCAmelCase_ , *UpperCAmelCase_ , **UpperCAmelCase_)
output.append(UpperCAmelCase_)
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f"""The tokenizer already has placeholder token {token} that can get confused with"""
f""" {placeholder_token}keep placeholder tokens independent""")
a : Any = output
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : str=1.0):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Any = []
for i in range(len(UpperCAmelCase_)):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCAmelCase_))
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
a : List[Any] = self.token_map[placeholder_token]
a : int = tokens[: 1 + int(len(UpperCAmelCase_) * prop_tokens_to_load)]
if vector_shuffle:
a : List[Any] = copy.copy(UpperCAmelCase_)
random.shuffle(UpperCAmelCase_)
a : List[str] = text.replace(UpperCAmelCase_ , ' '.join(UpperCAmelCase_))
return text
def __call__( self : Optional[int] , UpperCAmelCase_ : Any , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : Optional[int]=1.0 , **UpperCAmelCase_ : str):
"""simple docstring"""
return super().__call__(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Dict , UpperCAmelCase_ : Optional[int] , *UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any=False , UpperCAmelCase_ : Optional[Any]=1.0 , **UpperCAmelCase_ : Dict):
"""simple docstring"""
return super().encode(
self.replace_placeholder_tokens_in_text(
UpperCAmelCase_ , vector_shuffle=UpperCAmelCase_ , prop_tokens_to_load=UpperCAmelCase_) , *UpperCAmelCase_ , **UpperCAmelCase_ , )
| 345 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int ) -> bool:
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 345 | '''simple docstring'''
import argparse
import json
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinConfig, SwinForImageClassification
def SCREAMING_SNAKE_CASE__ ( snake_case : str ) -> Optional[Any]:
"""simple docstring"""
a : Union[str, Any] = SwinConfig()
a : Optional[int] = swin_name.split('_' )
a : Union[str, Any] = name_split[1]
a : Dict = int(name_split[4] )
a : Union[str, Any] = int(name_split[3][-1] )
if model_size == "tiny":
a : Optional[Any] = 96
a : Any = (2, 2, 6, 2)
a : List[str] = (3, 6, 12, 24)
elif model_size == "small":
a : int = 96
a : List[str] = (2, 2, 18, 2)
a : int = (3, 6, 12, 24)
elif model_size == "base":
a : Tuple = 128
a : Optional[int] = (2, 2, 18, 2)
a : List[Any] = (4, 8, 16, 32)
else:
a : Dict = 192
a : str = (2, 2, 18, 2)
a : List[Any] = (6, 12, 24, 48)
if "in22k" in swin_name:
a : Any = 21_841
else:
a : str = 1_000
a : str = 'huggingface/label-files'
a : Optional[Any] = 'imagenet-1k-id2label.json'
a : Dict = json.load(open(hf_hub_download(snake_case , snake_case , repo_type='dataset' ) , 'r' ) )
a : Tuple = {int(snake_case ): v for k, v in idalabel.items()}
a : int = idalabel
a : str = {v: k for k, v in idalabel.items()}
a : Dict = img_size
a : List[Any] = num_classes
a : str = embed_dim
a : Dict = depths
a : Union[str, Any] = num_heads
a : int = window_size
return config
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] ) -> Optional[int]:
"""simple docstring"""
if "patch_embed.proj" in name:
a : int = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
a : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
a : Optional[int] = 'encoder.' + name
if "attn.proj" in name:
a : List[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
a : Tuple = name.replace('attn' , 'attention.self' )
if "norm1" in name:
a : Optional[int] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
a : Dict = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
a : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
a : Any = name.replace('mlp.fc2' , 'output.dense' )
if name == "norm.weight":
a : Union[str, Any] = 'layernorm.weight'
if name == "norm.bias":
a : List[str] = 'layernorm.bias'
if "head" in name:
a : Union[str, Any] = name.replace('head' , 'classifier' )
else:
a : List[Any] = 'swin.' + name
return name
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple ) -> List[str]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
a : Any = orig_state_dict.pop(snake_case )
if "mask" in key:
continue
elif "qkv" in key:
a : Optional[Any] = key.split('.' )
a : Dict = int(key_split[1] )
a : Optional[int] = int(key_split[3] )
a : Tuple = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
a : Optional[Any] = val[:dim, :]
a : List[Any] = val[
dim : dim * 2, :
]
a : List[Any] = val[-dim:, :]
else:
a : Dict = val[
:dim
]
a : Union[str, Any] = val[
dim : dim * 2
]
a : Union[str, Any] = val[
-dim:
]
else:
a : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict ) -> List[str]:
"""simple docstring"""
a : Any = timm.create_model(snake_case , pretrained=snake_case )
timm_model.eval()
a : str = get_swin_config(snake_case )
a : Optional[int] = SwinForImageClassification(snake_case )
model.eval()
a : Union[str, Any] = convert_state_dict(timm_model.state_dict() , snake_case )
model.load_state_dict(snake_case )
a : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
a : Optional[Any] = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swin_name.replace('_' , '-' ) ) )
a : str = Image.open(requests.get(snake_case , stream=snake_case ).raw )
a : Union[str, Any] = image_processor(images=snake_case , return_tensors='pt' )
a : int = timm_model(inputs['pixel_values'] )
a : Optional[int] = model(**snake_case ).logits
assert torch.allclose(snake_case , snake_case , atol=1E-3 )
print(F"""Saving model {swin_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case )
if __name__ == "__main__":
UpperCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swin_name""",
default="""swin_tiny_patch4_window7_224""",
type=str,
help="""Name of the Swin timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
UpperCamelCase : Optional[Any] = parser.parse_args()
convert_swin_checkpoint(args.swin_name, args.pytorch_dump_folder_path)
| 345 | 1 |
'''simple docstring'''
import copy
from typing import Dict, List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
UpperCamelCase : int = {
"""facebook/mask2former-swin-small-coco-instance""": (
"""https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json"""
)
# See all Mask2Former models at https://huggingface.co/models?filter=mask2former
}
UpperCamelCase : List[str] = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Optional[Any] = "mask2former"
A : str = ["swin"]
A : List[Any] = {"hidden_size": "hidden_dim"}
def __init__( self : Any , UpperCAmelCase_ : Optional[Dict] = None , UpperCAmelCase_ : int = 2_5_6 , UpperCAmelCase_ : int = 2_5_6 , UpperCAmelCase_ : int = 2_5_6 , UpperCAmelCase_ : int = 1_0_2_4 , UpperCAmelCase_ : str = "relu" , UpperCAmelCase_ : int = 6 , UpperCAmelCase_ : int = 1_0 , UpperCAmelCase_ : int = 8 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 2_0_4_8 , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : int = 4 , UpperCAmelCase_ : int = 2_5_5 , UpperCAmelCase_ : int = 1_0_0 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 2.0 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : float = 5.0 , UpperCAmelCase_ : int = 1_2_5_4_4 , UpperCAmelCase_ : float = 3.0 , UpperCAmelCase_ : float = 0.75 , UpperCAmelCase_ : float = 0.02 , UpperCAmelCase_ : float = 1.0 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : List[int] = [4, 8, 1_6, 3_2] , UpperCAmelCase_ : bool = None , **UpperCAmelCase_ : Tuple , ):
"""simple docstring"""
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.')
a : Any = CONFIG_MAPPING['swin'](
image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=UpperCAmelCase_ , out_features=['stage1', 'stage2', 'stage3', 'stage4'] , )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
a : Dict = backbone_config.pop('model_type')
a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
a : Union[str, Any] = config_class.from_dict(UpperCAmelCase_)
# verify that the backbone is supported
if backbone_config.model_type not in self.backbones_supported:
logger.warning_once(
f"""Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. """
f"""Supported model types: {','.join(self.backbones_supported)}""")
a : Optional[int] = backbone_config
a : Dict = feature_size
a : Union[str, Any] = mask_feature_size
a : Tuple = hidden_dim
a : int = encoder_feedforward_dim
a : List[Any] = activation_function
a : Dict = encoder_layers
a : int = decoder_layers
a : Any = num_attention_heads
a : Union[str, Any] = dropout
a : Tuple = dim_feedforward
a : List[Any] = pre_norm
a : List[Any] = enforce_input_projection
a : Tuple = common_stride
a : Union[str, Any] = ignore_value
a : Tuple = num_queries
a : Any = no_object_weight
a : int = class_weight
a : Dict = mask_weight
a : Union[str, Any] = dice_weight
a : Tuple = train_num_points
a : Any = oversample_ratio
a : Union[str, Any] = importance_sample_ratio
a : Dict = init_std
a : List[str] = init_xavier_std
a : str = use_auxiliary_loss
a : str = feature_strides
a : Optional[int] = output_auxiliary_logits
a : Tuple = decoder_layers
super().__init__(**UpperCAmelCase_)
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls : int , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return cls(
backbone_config=UpperCAmelCase_ , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = copy.deepcopy(self.__dict__)
a : Optional[Any] = self.backbone_config.to_dict()
a : Tuple = self.__class__.model_type
return output
| 345 | '''simple docstring'''
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class UpperCamelCase :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase_ : Tuple):
"""simple docstring"""
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
a : Dict = deepcopy(UpperCAmelCase_)
elif os.path.exists(UpperCAmelCase_):
with io.open(UpperCAmelCase_ , 'r' , encoding='utf-8') as f:
a : Union[str, Any] = json.load(UpperCAmelCase_)
else:
try:
a : Union[str, Any] = baseaa.urlsafe_baadecode(UpperCAmelCase_).decode('utf-8')
a : List[str] = json.loads(UpperCAmelCase_)
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""")
a : Optional[int] = config
self.set_stage_and_offload()
def SCREAMING_SNAKE_CASE_ ( self : List[Any]):
"""simple docstring"""
a : str = self.get_value('zero_optimization.stage' , -1)
# offload
a : Any = False
if self.is_zeroa() or self.is_zeroa():
a : Tuple = set(['cpu', 'nvme'])
a : int = set(
[
self.get_value('zero_optimization.offload_optimizer.device'),
self.get_value('zero_optimization.offload_param.device'),
])
if len(offload_devices & offload_devices_valid) > 0:
a : List[str] = True
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , UpperCAmelCase_ : Dict):
"""simple docstring"""
a : List[str] = self.config
# find the config node of interest if it exists
a : int = ds_key_long.split('.')
a : Union[str, Any] = nodes.pop()
for node in nodes:
a : Union[str, Any] = config.get(UpperCAmelCase_)
if config is None:
return None, ds_key
return config, ds_key
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : int=None):
"""simple docstring"""
a , a : int = self.find_config_node(UpperCAmelCase_)
if config is None:
return default
return config.get(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any=False):
"""simple docstring"""
a : Any = self.config
# find the config node of interest if it exists
a : Optional[Any] = ds_key_long.split('.')
for node in nodes:
a : List[str] = config
a : int = config.get(UpperCAmelCase_)
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""")
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : str):
"""simple docstring"""
a : List[str] = self.get_value(UpperCAmelCase_)
return False if value is None else bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : List[Any] = self.get_value(UpperCAmelCase_)
return False if value is None else not bool(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 2
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any]):
"""simple docstring"""
return self._stage == 3
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
return self._offload
class UpperCamelCase :
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : int):
"""simple docstring"""
a : Union[str, Any] = engine
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
self.engine.backward(UpperCAmelCase_ , **UpperCAmelCase_)
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : Any):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , device_placement=UpperCAmelCase_ , scaler=UpperCAmelCase_)
a : List[str] = hasattr(self.optimizer , 'overflow')
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : Dict=None):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def SCREAMING_SNAKE_CASE_ ( self : Dict):
"""simple docstring"""
if self.__has_overflow__:
return self.optimizer.overflow
return False
class UpperCamelCase ( a_ ):
"""simple docstring"""
def __init__( self : str , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
super().__init__(UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : int):
"""simple docstring"""
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class UpperCamelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=0.0_01 , UpperCAmelCase_ : List[Any]=0 , **UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : int = params
a : str = lr
a : Tuple = weight_decay
a : Dict = kwargs
class UpperCamelCase :
"""simple docstring"""
def __init__( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : Union[str, Any]=0 , **UpperCAmelCase_ : List[Any]):
"""simple docstring"""
a : str = optimizer
a : Tuple = total_num_steps
a : Optional[Any] = warmup_num_steps
a : List[str] = kwargs
| 345 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Optional[int] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Dict = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Union[str, Any] = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 345 | '''simple docstring'''
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
UpperCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class UpperCamelCase :
"""simple docstring"""
A : str = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
A : str = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
A : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
A : bool = field(
default=a_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
a : Union[str, Any] = self.task_name.lower()
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : int = "train"
A : Tuple = "dev"
A : List[Any] = "test"
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : GlueDataTrainingArguments
A : str
A : List[InputFeatures]
def __init__( self : Tuple , UpperCAmelCase_ : GlueDataTrainingArguments , UpperCAmelCase_ : PreTrainedTokenizerBase , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Union[str, Split] = Split.train , UpperCAmelCase_ : Optional[str] = None , ):
"""simple docstring"""
warnings.warn(
'This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets '
'library. You can have a look at this example script for pointers: '
'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' , UpperCAmelCase_ , )
a : Dict = args
a : int = glue_processors[args.task_name]()
a : int = glue_output_modes[args.task_name]
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
try:
a : str = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name')
# Load data features from cache or dataset file
a : List[str] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , )
a : Tuple = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
a , a : str = label_list[2], label_list[1]
a : int = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
a : Union[str, Any] = cached_features_file + '.lock'
with FileLock(UpperCAmelCase_):
if os.path.exists(UpperCAmelCase_) and not args.overwrite_cache:
a : Optional[Any] = time.time()
a : Optional[Any] = torch.load(UpperCAmelCase_)
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start)
else:
logger.info(f"""Creating features from dataset file at {args.data_dir}""")
if mode == Split.dev:
a : List[Any] = self.processor.get_dev_examples(args.data_dir)
elif mode == Split.test:
a : Optional[Any] = self.processor.get_test_examples(args.data_dir)
else:
a : List[str] = self.processor.get_train_examples(args.data_dir)
if limit_length is not None:
a : Dict = examples[:limit_length]
a : List[Any] = glue_convert_examples_to_features(
UpperCAmelCase_ , UpperCAmelCase_ , max_length=args.max_seq_length , label_list=UpperCAmelCase_ , output_mode=self.output_mode , )
a : Dict = time.time()
torch.save(self.features , UpperCAmelCase_)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""")
def __len__( self : Tuple):
"""simple docstring"""
return len(self.features)
def __getitem__( self : Optional[int] , UpperCAmelCase_ : List[str]):
"""simple docstring"""
return self.features[i]
def SCREAMING_SNAKE_CASE_ ( self : str):
"""simple docstring"""
return self.label_list
| 345 | 1 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
UpperCamelCase : Tuple = """\
@inproceedings{Rajpurkar2016SQuAD10,
title={SQuAD: 100, 000+ Questions for Machine Comprehension of Text},
author={Pranav Rajpurkar and Jian Zhang and Konstantin Lopyrev and Percy Liang},
booktitle={EMNLP},
year={2016}
}
"""
UpperCamelCase : Tuple = """
This metric wrap the official scoring script for version 1 of the Stanford Question Answering Dataset (SQuAD).
Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, consisting of questions posed by
crowdworkers on a set of Wikipedia articles, where the answer to every question is a segment of text, or span,
from the corresponding reading passage, or the question might be unanswerable.
"""
UpperCamelCase : Union[str, Any] = """
Computes SQuAD scores (F1 and EM).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': the text of the answer
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the SQuAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
Examples:
>>> predictions = [{'prediction_text': '1976', 'id': '56e10a3be3433e1400422b22'}]
>>> references = [{'answers': {'answer_start': [97], 'text': ['1976']}, 'id': '56e10a3be3433e1400422b22'}]
>>> squad_metric = datasets.load_metric(\"squad\")
>>> results = squad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self : Tuple):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': {'id': datasets.Value('string'), 'prediction_text': datasets.Value('string')},
'references': {
'id': datasets.Value('string'),
'answers': datasets.features.Sequence(
{
'text': datasets.Value('string'),
'answer_start': datasets.Value('int32'),
}),
},
}) , codebase_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , reference_urls=['https://rajpurkar.github.io/SQuAD-explorer/'] , )
def SCREAMING_SNAKE_CASE_ ( self : Any , UpperCAmelCase_ : Dict , UpperCAmelCase_ : Union[str, Any]):
"""simple docstring"""
a : Optional[int] = {prediction['id']: prediction['prediction_text'] for prediction in predictions}
a : List[str] = [
{
'paragraphs': [
{
'qas': [
{
'answers': [{'text': answer_text} for answer_text in ref['answers']['text']],
'id': ref['id'],
}
for ref in references
]
}
]
}
]
a : List[str] = evaluate(dataset=UpperCAmelCase_ , predictions=UpperCAmelCase_)
return score
| 345 | '''simple docstring'''
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
UpperCamelCase : Dict = logging.get_logger(__name__)
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = ["pixel_values"]
def __init__( self : str , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Dict[str, int]] = None , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
super().__init__(**UpperCAmelCase_)
a : str = size if size is not None else {'shortest_edge': 2_5_6}
a : Dict = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = crop_size if crop_size is not None else {'height': 2_2_4, 'width': 2_2_4}
a : int = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : Any = do_resize
a : List[str] = size
a : Union[str, Any] = resample
a : int = do_center_crop
a : Optional[int] = crop_size
a : Tuple = do_rescale
a : int = rescale_factor
a : Optional[Any] = do_normalize
a : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
a : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : Optional[int] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""")
a : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase_ , size=size['shortest_edge'] , default_to_square=UpperCAmelCase_)
return resize(UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Dict[str, int] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
a : List[str] = get_size_dict(UpperCAmelCase_)
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""")
return center_crop(UpperCAmelCase_ , size=(size['height'], size['width']) , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : float , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[Any]):
"""simple docstring"""
return rescale(UpperCAmelCase_ , scale=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : Tuple , UpperCAmelCase_ : np.ndarray , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Union[float, List[float]] , UpperCAmelCase_ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase_ : Optional[int] , ):
"""simple docstring"""
return normalize(UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_ , data_format=UpperCAmelCase_ , **UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : str , UpperCAmelCase_ : ImageInput , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : PILImageResampling = None , UpperCAmelCase_ : bool = None , UpperCAmelCase_ : Dict[str, int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[float] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[float, List[float]]] = None , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , UpperCAmelCase_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCAmelCase_ : List[str] , ):
"""simple docstring"""
a : int = do_resize if do_resize is not None else self.do_resize
a : int = size if size is not None else self.size
a : Union[str, Any] = get_size_dict(UpperCAmelCase_ , default_to_square=UpperCAmelCase_)
a : str = resample if resample is not None else self.resample
a : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
a : Union[str, Any] = crop_size if crop_size is not None else self.crop_size
a : Dict = get_size_dict(UpperCAmelCase_ , param_name='crop_size')
a : str = do_rescale if do_rescale is not None else self.do_rescale
a : int = rescale_factor if rescale_factor is not None else self.rescale_factor
a : str = do_normalize if do_normalize is not None else self.do_normalize
a : List[str] = image_mean if image_mean is not None else self.image_mean
a : Optional[int] = image_std if image_std is not None else self.image_std
a : Dict = make_list_of_images(UpperCAmelCase_)
if not valid_images(UpperCAmelCase_):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.')
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.')
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.')
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.')
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.')
# All transformations expect numpy arrays.
a : List[Any] = [to_numpy_array(UpperCAmelCase_) for image in images]
if do_resize:
a : Dict = [self.resize(image=UpperCAmelCase_ , size=UpperCAmelCase_ , resample=UpperCAmelCase_) for image in images]
if do_center_crop:
a : Any = [self.center_crop(image=UpperCAmelCase_ , size=UpperCAmelCase_) for image in images]
if do_rescale:
a : Optional[int] = [self.rescale(image=UpperCAmelCase_ , scale=UpperCAmelCase_) for image in images]
if do_normalize:
a : Dict = [self.normalize(image=UpperCAmelCase_ , mean=UpperCAmelCase_ , std=UpperCAmelCase_) for image in images]
a : List[Any] = [to_channel_dimension_format(UpperCAmelCase_ , UpperCAmelCase_) for image in images]
a : List[str] = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase_ , tensor_type=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ ( self : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Tuple] = None):
"""simple docstring"""
a : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCAmelCase_) != len(UpperCAmelCase_):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits')
if is_torch_tensor(UpperCAmelCase_):
a : Optional[Any] = target_sizes.numpy()
a : List[str] = []
for idx in range(len(UpperCAmelCase_)):
a : Optional[Any] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0) , size=target_sizes[idx] , mode='bilinear' , align_corners=UpperCAmelCase_)
a : Union[str, Any] = resized_logits[0].argmax(dim=0)
semantic_segmentation.append(UpperCAmelCase_)
else:
a : Optional[int] = logits.argmax(dim=1)
a : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])]
return semantic_segmentation
| 345 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase : Any = logging.get_logger(__name__)
UpperCamelCase : List[str] = {
"""studio-ousia/luke-base""": """https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json""",
"""studio-ousia/luke-large""": """https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json""",
}
class UpperCamelCase ( a_ ):
"""simple docstring"""
A : Any = "luke"
def __init__( self : Any , UpperCAmelCase_ : Tuple=5_0_2_6_7 , UpperCAmelCase_ : List[str]=5_0_0_0_0_0 , UpperCAmelCase_ : Optional[int]=7_6_8 , UpperCAmelCase_ : Any=2_5_6 , UpperCAmelCase_ : List[Any]=1_2 , UpperCAmelCase_ : Tuple=1_2 , UpperCAmelCase_ : Any=3_0_7_2 , UpperCAmelCase_ : str="gelu" , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : List[str]=5_1_2 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Any=0.02 , UpperCAmelCase_ : str=1e-12 , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : List[str]=1 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : List[Any]=2 , **UpperCAmelCase_ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
a : int = vocab_size
a : Tuple = entity_vocab_size
a : Optional[Any] = hidden_size
a : Tuple = entity_emb_size
a : Any = num_hidden_layers
a : Dict = num_attention_heads
a : Any = hidden_act
a : Union[str, Any] = intermediate_size
a : Union[str, Any] = hidden_dropout_prob
a : Union[str, Any] = attention_probs_dropout_prob
a : Dict = max_position_embeddings
a : Any = type_vocab_size
a : int = initializer_range
a : Tuple = layer_norm_eps
a : int = use_entity_aware_attention
a : Optional[Any] = classifier_dropout
| 345 | '''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( snake_case : int | float | str , snake_case : int | float | str ) -> list[str]:
"""simple docstring"""
if nth_term == "":
return [""]
a : Dict = int(snake_case )
a : Optional[int] = int(snake_case )
a : list[str] = []
for temp in range(int(snake_case ) ):
series.append(F"""1 / {pow(temp + 1 , int(snake_case ) )}""" if series else '1' )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase : Optional[int] = int(input("""Enter the last number (nth term) of the P-Series"""))
UpperCamelCase : List[Any] = int(input("""Enter the power for P-Series"""))
print("""Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p""")
print(p_series(nth_term, power))
| 345 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.