code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from functools import lru_cache
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = 2
__lowerCAmelCase = set()
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.add(lowerCAmelCase_ )
if n > 1:
factors.add(lowerCAmelCase_ )
return factors
@lru_cache
def a_ ( lowerCAmelCase_ : int ):
return len(unique_prime_factors(lowerCAmelCase_ ) )
def a_ ( lowerCAmelCase_ : list ):
return len(set(lowerCAmelCase_ ) ) in (0, 1)
def a_ ( lowerCAmelCase_ : int ):
__lowerCAmelCase = 2
while True:
# Increment each value of a generated range
__lowerCAmelCase = [base + i for i in range(lowerCAmelCase_ )]
# Run elements through out unique_prime_factors function
# Append our target number to the end.
__lowerCAmelCase = [upf_len(lowerCAmelCase_ ) for x in group]
checker.append(lowerCAmelCase_ )
# If all numbers in the list are equal, return the group variable.
if equality(lowerCAmelCase_ ):
return group
# Increment our base variable by 1
base += 1
def a_ ( lowerCAmelCase_ : int = 4 ):
__lowerCAmelCase = run(lowerCAmelCase_ )
return results[0] if len(lowerCAmelCase_ ) else None
if __name__ == "__main__":
print(solution())
| 53 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case : Union[str, Any] = logging.get_logger(__name__)
def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : int=False ):
__lowerCAmelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append((F"""blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('cls_token', 'vit.embeddings.cls_token'),
('patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
__lowerCAmelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : Tuple, lowerCAmelCase_ : Optional[int]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
__lowerCAmelCase = ''
else:
__lowerCAmelCase = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.weight""" )
__lowerCAmelCase = state_dict.pop(F"""blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
__lowerCAmelCase = in_proj_weight[
: config.hidden_size, :
]
__lowerCAmelCase = in_proj_bias[: config.hidden_size]
__lowerCAmelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__lowerCAmelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__lowerCAmelCase = in_proj_weight[
-config.hidden_size :, :
]
__lowerCAmelCase = in_proj_bias[-config.hidden_size :]
def a_ ( lowerCAmelCase_ : List[str] ):
__lowerCAmelCase = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ):
__lowerCAmelCase = dct.pop(lowerCAmelCase_ )
__lowerCAmelCase = val
def a_ ( ):
__lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_, stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[Any]=True ):
__lowerCAmelCase = ViTConfig()
# patch_size
if model_name[-1] == "8":
__lowerCAmelCase = 8
# set labels if required
if not base_model:
__lowerCAmelCase = 1000
__lowerCAmelCase = 'huggingface/label-files'
__lowerCAmelCase = 'imagenet-1k-id2label.json'
__lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_, lowerCAmelCase_, repo_type='dataset' ), 'r' ) )
__lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
__lowerCAmelCase = idalabel
__lowerCAmelCase = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
__lowerCAmelCase = 384
__lowerCAmelCase = 1536
__lowerCAmelCase = 12
__lowerCAmelCase = 6
# load original model from torch hub
__lowerCAmelCase = torch.hub.load('facebookresearch/dino:main', lowerCAmelCase_ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
__lowerCAmelCase = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCAmelCase_ )
__lowerCAmelCase = create_rename_keys(lowerCAmelCase_, base_model=lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# load HuggingFace model
if base_model:
__lowerCAmelCase = ViTModel(lowerCAmelCase_, add_pooling_layer=lowerCAmelCase_ ).eval()
else:
__lowerCAmelCase = ViTForImageClassification(lowerCAmelCase_ ).eval()
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image, prepared by ViTImageProcessor
__lowerCAmelCase = ViTImageProcessor()
__lowerCAmelCase = image_processor(images=prepare_img(), return_tensors='pt' )
__lowerCAmelCase = encoding['pixel_values']
__lowerCAmelCase = model(lowerCAmelCase_ )
if base_model:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert torch.allclose(lowerCAmelCase_, outputs.last_hidden_state[:, 0, :], atol=1E-1 )
else:
__lowerCAmelCase = original_model(lowerCAmelCase_ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_, outputs.logits, atol=1E-3 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_snake_case : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
_snake_case : List[Any] = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 53 | 1 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class _a ( lowercase__ ):
'''simple docstring'''
def __init__( self , __UpperCAmelCase , __UpperCAmelCase=13 , __UpperCAmelCase=7 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=False , __UpperCAmelCase=True , __UpperCAmelCase=99 , __UpperCAmelCase=32 , __UpperCAmelCase=5 , __UpperCAmelCase=4 , __UpperCAmelCase=64 , __UpperCAmelCase="gelu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=512 , __UpperCAmelCase=16 , __UpperCAmelCase=2 , __UpperCAmelCase=0.02 , __UpperCAmelCase=3 , __UpperCAmelCase=4 , __UpperCAmelCase=None , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=2 , __UpperCAmelCase=4 , __UpperCAmelCase=1 , ):
__A : Optional[int] = parent
__A : Optional[Any] = batch_size
__A : List[str] = seq_length
__A : Dict = is_training
__A : int = use_input_mask
__A : str = use_token_type_ids
__A : Optional[Any] = use_labels
__A : Optional[Any] = vocab_size
__A : Tuple = hidden_size
__A : List[Any] = num_hidden_layers
__A : List[Any] = num_attention_heads
__A : List[Any] = intermediate_size
__A : int = hidden_act
__A : Tuple = hidden_dropout_prob
__A : int = attention_probs_dropout_prob
__A : Dict = max_position_embeddings
__A : int = type_vocab_size
__A : Union[str, Any] = type_sequence_label_size
__A : Union[str, Any] = initializer_range
__A : List[Any] = num_labels
__A : Dict = num_choices
__A : Dict = scope
__A : List[str] = q_groups
__A : List[str] = k_groups
__A : Dict = v_groups
__A : Dict = post_attention_groups
__A : Tuple = intermediate_groups
__A : Optional[int] = output_groups
def __UpperCAmelCase( self ):
__A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__A : List[str] = None
if self.use_input_mask:
__A : Any = random_attention_mask([self.batch_size, self.seq_length] )
__A : Dict = None
__A : List[str] = None
__A : Optional[int] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__A : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__A : Dict = ids_tensor([self.batch_size] , self.num_choices )
__A : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase( self ):
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Tuple = SqueezeBertModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Optional[Any] = model(__lowercase , __lowercase )
__A : Optional[int] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Union[str, Any] = SqueezeBertForMaskedLM(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Union[str, Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Union[str, Any] = SqueezeBertForQuestionAnswering(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : List[str] = model(
__lowercase , attention_mask=__lowercase , start_positions=__lowercase , end_positions=__lowercase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : int = self.num_labels
__A : List[str] = SqueezeBertForSequenceClassification(__lowercase )
model.to(__lowercase )
model.eval()
__A : List[str] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : List[Any] = self.num_labels
__A : Optional[Any] = SqueezeBertForTokenClassification(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Union[str, Any] = model(__lowercase , attention_mask=__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__A : Optional[int] = self.num_choices
__A : Union[str, Any] = SqueezeBertForMultipleChoice(config=__lowercase )
model.to(__lowercase )
model.eval()
__A : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__A : Any = model(
__lowercase , attention_mask=__lowercase , labels=__lowercase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __UpperCAmelCase( self ):
__A : List[str] = self.prepare_config_and_inputs()
((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) : int = config_and_inputs
__A : str = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _a ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase_ : Tuple = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCamelCase_ : List[str] = (
{
"""feature-extraction""": SqueezeBertModel,
"""fill-mask""": SqueezeBertForMaskedLM,
"""question-answering""": SqueezeBertForQuestionAnswering,
"""text-classification""": SqueezeBertForSequenceClassification,
"""token-classification""": SqueezeBertForTokenClassification,
"""zero-shot""": SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ : Tuple = False
lowerCamelCase_ : Dict = True
lowerCamelCase_ : Union[str, Any] = False
def __UpperCAmelCase( self ):
__A : Tuple = SqueezeBertModelTester(self )
__A : Optional[int] = ConfigTester(self , config_class=__lowercase , dim=37 )
def __UpperCAmelCase( self ):
self.config_tester.run_common_tests()
def __UpperCAmelCase( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*__lowercase )
def __UpperCAmelCase( self ):
__A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*__lowercase )
def __UpperCAmelCase( self ):
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*__lowercase )
def __UpperCAmelCase( self ):
__A : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__lowercase )
def __UpperCAmelCase( self ):
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*__lowercase )
def __UpperCAmelCase( self ):
__A : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__lowercase )
@slow
def __UpperCAmelCase( self ):
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : str = SqueezeBertModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_sentencepiece
@require_tokenizers
@require_torch
class _a ( unittest.TestCase ):
'''simple docstring'''
@slow
def __UpperCAmelCase( self ):
__A : str = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" )
__A : Optional[Any] = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
__A : Dict = model(__lowercase )[0]
__A : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , __lowercase )
__A : List[Any] = torch.tensor([[0.64_01, -0.03_49, -0.60_41]] )
self.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-4 ) )
| 715 | import pprint
import requests
UpperCamelCase = 'https://zenquotes.io/api'
def lowerCamelCase_ ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/today" ).json()
def lowerCamelCase_ ( ) -> list:
return requests.get(API_ENDPOINT_URL + "/random" ).json()
if __name__ == "__main__":
UpperCamelCase = random_quotes()
pprint.pprint(response)
| 387 | 0 |
def A__ ( snake_case_ : int = 2_000_000 ):
SCREAMING_SNAKE_CASE__: Optional[Any]= [0 for i in range(n + 1 )]
SCREAMING_SNAKE_CASE__: Any= 1
SCREAMING_SNAKE_CASE__: Union[str, Any]= 1
for i in range(2 , int(n**0.5 ) + 1 ):
if primality_list[i] == 0:
for j in range(i * i , n + 1 , snake_case_ ):
SCREAMING_SNAKE_CASE__: List[Any]= 1
SCREAMING_SNAKE_CASE__: List[str]= 0
for i in range(snake_case_ ):
if primality_list[i] == 0:
sum_of_primes += i
return sum_of_primes
if __name__ == "__main__":
print(f'''{solution() = }''')
| 64 |
import string
from math import logaa
def A__ ( _a : str , _a : str ):
'''simple docstring'''
snake_case__ : List[Any] =document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
snake_case__ : Any =document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def A__ ( _a : str , _a : str ):
'''simple docstring'''
snake_case__ : str =corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case__ : str =corpus_without_punctuation.split("""\n""" )
snake_case__ : Tuple =term.lower()
return (len([doc for doc in docs if term in doc] ), len(_a ))
def A__ ( _a : int , _a : int , _a : Tuple=False ):
'''simple docstring'''
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def A__ ( _a : int , _a : int ):
'''simple docstring'''
return round(tf * idf , 3 )
| 385 | 0 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class snake_case_( a__):
lowerCamelCase :List[Any] = ["""image_processor""", """tokenizer"""]
lowerCamelCase :int = """BlipImageProcessor"""
lowerCamelCase :List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , __lowercase , __lowercase ) -> Union[str, Any]:
lowerCamelCase : int =False
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
lowerCamelCase : Tuple =self.image_processor
def __call__( self , __lowercase = None , __lowercase = None , __lowercase = True , __lowercase = False , __lowercase = None , __lowercase = None , __lowercase = 0 , __lowercase = None , __lowercase = None , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = False , __lowercase = True , __lowercase = None , **__lowercase , ) -> BatchEncoding:
if images is None and text is None:
raise ValueError('''You have to specify either images or text.''' )
# Get only text
if images is None:
lowerCamelCase : int =self.tokenizer
lowerCamelCase : int =self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
return text_encoding
# add pixel_values
lowerCamelCase : Union[str, Any] =self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
if text is not None:
lowerCamelCase : List[Any] =self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
else:
lowerCamelCase : List[str] =None
if text_encoding is not None:
encoding_image_processor.update(lowerCAmelCase__ )
return encoding_image_processor
def __lowercase ( self , *__lowercase , **__lowercase ) -> str:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __lowercase ( self , *__lowercase , **__lowercase ) -> List[str]:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __lowercase ( self ) -> Any:
lowerCamelCase : Dict =self.tokenizer.model_input_names
lowerCamelCase : Union[str, Any] =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 709 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class snake_case_ ( _A):
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__lowercase , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(__lowercase , '''depth_multiplier''' ) )
class snake_case_ :
def __init__( self , __lowercase , __lowercase=1_3 , __lowercase=3 , __lowercase=3_2 , __lowercase=0.2_5 , __lowercase=8 , __lowercase=8 , __lowercase=6 , __lowercase=3_2 , __lowercase=True , __lowercase=True , __lowercase=True , __lowercase="relu6" , __lowercase=1_2_8_0 , __lowercase=0.1 , __lowercase=0.0_2 , __lowercase=True , __lowercase=True , __lowercase=1_0 , __lowercase=None , ) -> int:
lowerCamelCase : Union[str, Any] =parent
lowerCamelCase : Union[str, Any] =batch_size
lowerCamelCase : int =num_channels
lowerCamelCase : str =image_size
lowerCamelCase : List[Any] =depth_multiplier
lowerCamelCase : Dict =depth_divisible_by
lowerCamelCase : Optional[Any] =min_depth
lowerCamelCase : Optional[Any] =expand_ratio
lowerCamelCase : List[str] =tf_padding
lowerCamelCase : int =output_stride
lowerCamelCase : Optional[Any] =first_layer_is_expansion
lowerCamelCase : List[Any] =finegrained_output
lowerCamelCase : int =hidden_act
lowerCamelCase : List[str] =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
lowerCamelCase : str =classifier_dropout_prob
lowerCamelCase : int =use_labels
lowerCamelCase : Optional[int] =is_training
lowerCamelCase : int =num_labels
lowerCamelCase : Dict =initializer_range
lowerCamelCase : Tuple =scope
def __lowercase ( self ) -> List[str]:
lowerCamelCase : Tuple =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase : Tuple =None
lowerCamelCase : Any =None
if self.use_labels:
lowerCamelCase : int =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase : List[str] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase : Optional[Any] =self.get_config()
return config, pixel_values, labels, pixel_labels
def __lowercase ( self ) -> Optional[Any]:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> Dict:
lowerCamelCase : Optional[Any] =MobileNetVaModel(config=__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : int =model(__lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> List[Any]:
lowerCamelCase : Optional[Any] =self.num_labels
lowerCamelCase : Optional[int] =MobileNetVaForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : Any =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowercase ( self , __lowercase , __lowercase , __lowercase , __lowercase ) -> str:
lowerCamelCase : int =self.num_labels
lowerCamelCase : List[Any] =MobileNetVaForSemanticSegmentation(__lowercase )
model.to(__lowercase )
model.eval()
lowerCamelCase : List[str] =model(__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase : Union[str, Any] =model(__lowercase , labels=__lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Any =self.prepare_config_and_inputs()
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : str =config_and_inputs
lowerCamelCase : Dict ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class snake_case_ ( _A , _A , unittest.TestCase):
lowerCamelCase :Union[str, Any] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCamelCase :Any = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase :List[str] = False
lowerCamelCase :Dict = False
lowerCamelCase :Any = False
lowerCamelCase :Dict = False
def __lowercase ( self ) -> Any:
lowerCamelCase : Union[str, Any] =MobileNetVaModelTester(self )
lowerCamelCase : List[str] =MobileNetVaConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def __lowercase ( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV2 does not use inputs_embeds''' )
def __lowercase ( self ) -> Dict:
pass
@unittest.skip(reason='''MobileNetV2 does not support input and output embeddings''' )
def __lowercase ( self ) -> Optional[Any]:
pass
@unittest.skip(reason='''MobileNetV2 does not output attentions''' )
def __lowercase ( self ) -> List[Any]:
pass
def __lowercase ( self ) -> List[Any]:
lowerCamelCase , lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int =model_class(__lowercase )
lowerCamelCase : Any =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase : str =[*signature.parameters.keys()]
lowerCamelCase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , __lowercase )
def __lowercase ( self ) -> Optional[Any]:
lowerCamelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def __lowercase ( self ) -> str:
def check_hidden_states_output(__lowercase , __lowercase , __lowercase ):
lowerCamelCase : Dict =model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
lowerCamelCase : Tuple =model(**self._prepare_for_class(__lowercase , __lowercase ) )
lowerCamelCase : Union[str, Any] =outputs.hidden_states
lowerCamelCase : Tuple =1_6
self.assertEqual(len(__lowercase ) , __lowercase )
lowerCamelCase , lowerCamelCase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase : int =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase : Optional[int] =True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def __lowercase ( self ) -> List[Any]:
lowerCamelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def __lowercase ( self ) -> int:
lowerCamelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__lowercase )
@slow
def __lowercase ( self ) -> List[Any]:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Dict =MobileNetVaModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def A__ ( ) -> List[Any]:
lowerCamelCase : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class snake_case_ ( unittest.TestCase):
@cached_property
def __lowercase ( self ) -> int:
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v2_1.0_224''' ) if is_vision_available() else None
)
@slow
def __lowercase ( self ) -> int:
lowerCamelCase : Tuple =MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v2_1.0_224''' ).to(__lowercase )
lowerCamelCase : Dict =self.default_image_processor
lowerCamelCase : Dict =prepare_img()
lowerCamelCase : List[Any] =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
lowerCamelCase : Any =model(**__lowercase )
# verify the logits
lowerCamelCase : Optional[Any] =torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , __lowercase )
lowerCamelCase : Optional[Any] =torch.tensor([0.2_4_4_5, -1.1_9_9_3, 0.1_9_0_5] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
@slow
def __lowercase ( self ) -> List[Any]:
lowerCamelCase : Optional[Any] =MobileNetVaForSemanticSegmentation.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowerCamelCase : Union[str, Any] =model.to(__lowercase )
lowerCamelCase : List[Any] =MobileNetVaImageProcessor.from_pretrained('''google/deeplabv3_mobilenet_v2_1.0_513''' )
lowerCamelCase : Any =prepare_img()
lowerCamelCase : Dict =image_processor(images=__lowercase , return_tensors='''pt''' ).to(__lowercase )
# forward pass
with torch.no_grad():
lowerCamelCase : Optional[Any] =model(**__lowercase )
lowerCamelCase : List[str] =outputs.logits
# verify the logits
lowerCamelCase : int =torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , __lowercase )
lowerCamelCase : List[Any] =torch.tensor(
[
[[1_7.5_7_9_0, 1_7.7_5_8_1, 1_8.3_3_5_5], [1_8.3_2_5_7, 1_8.4_2_3_0, 1_8.8_9_7_3], [1_8.6_1_6_9, 1_8.8_6_5_0, 1_9.2_1_8_7]],
[[-2.1_5_9_5, -2.0_9_7_7, -2.3_7_4_1], [-2.4_2_2_6, -2.3_0_2_8, -2.6_8_3_5], [-2.7_8_1_9, -2.5_9_9_1, -2.7_7_0_6]],
[[4.2_0_5_8, 4.8_3_1_7, 4.7_6_3_8], [4.4_1_3_6, 5.0_3_6_1, 4.9_3_8_3], [4.5_0_2_8, 4.9_6_4_4, 4.8_7_3_4]],
] , device=__lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __lowercase , atol=1e-4 ) )
| 262 | 0 |
import inspect
import unittest
from transformers import RegNetConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import RegNetForImageClassification, RegNetModel
from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class SCREAMING_SNAKE_CASE :
def __init__( self : Tuple , __lowercase : Optional[int] , __lowercase : Union[str, Any]=3 , __lowercase : str=32 , __lowercase : Any=3 , __lowercase : List[str]=10 , __lowercase : str=[10, 20, 30, 40] , __lowercase : Union[str, Any]=[1, 1, 2, 1] , __lowercase : List[str]=True , __lowercase : Optional[int]=True , __lowercase : str="relu" , __lowercase : List[Any]=3 , __lowercase : Tuple=None , ):
'''simple docstring'''
__a = parent
__a = batch_size
__a = image_size
__a = num_channels
__a = embeddings_size
__a = hidden_sizes
__a = depths
__a = is_training
__a = use_labels
__a = hidden_act
__a = num_labels
__a = scope
__a = len(__lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a = None
if self.use_labels:
__a = ids_tensor([self.batch_size] , self.num_labels )
__a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , )
def UpperCamelCase_ ( self : Dict , __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : Optional[Any] ):
'''simple docstring'''
__a = RegNetModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def UpperCamelCase_ ( self : int , __lowercase : Optional[int] , __lowercase : Union[str, Any] , __lowercase : Optional[int] ):
'''simple docstring'''
__a = self.num_labels
__a = RegNetForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__a = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
__a = self.prepare_config_and_inputs()
__a , __a , __a = config_and_inputs
__a = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ):
__lowerCamelCase : int =(RegNetModel, RegNetForImageClassification) if is_torch_available() else ()
__lowerCamelCase : str =(
{'feature-extraction': RegNetModel, 'image-classification': RegNetForImageClassification}
if is_torch_available()
else {}
)
__lowerCamelCase : Optional[Any] =False
__lowerCamelCase : Any =False
__lowerCamelCase : List[str] =False
__lowerCamelCase : Tuple =False
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = RegNetModelTester(self )
__a = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
return
@unittest.skip(reason="""RegNet does not use inputs_embeds""" )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""RegNet does not support input and output embeddings""" )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(__lowercase )
__a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a = [*signature.parameters.keys()]
__a = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a = model_class(config=__lowercase )
for name, module in model.named_modules():
if isinstance(__lowercase , (nn.BatchNormad, nn.GroupNorm) ):
self.assertTrue(
torch.all(module.weight == 1 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
self.assertTrue(
torch.all(module.bias == 0 ) , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , )
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : Optional[int] , __lowercase : int ):
__a = model_class(__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
__a = model(**self._prepare_for_class(__lowercase , __lowercase ) )
__a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a = self.model_tester.num_stages
self.assertEqual(len(__lowercase ) , expected_num_stages + 1 )
# RegNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , )
__a , __a = self.model_tester.prepare_config_and_inputs_for_common()
__a = ["""basic""", """bottleneck"""]
for model_class in self.all_model_classes:
for layer_type in layers_type:
__a = layer_type
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a = True
check_hidden_states_output(__lowercase , __lowercase , __lowercase )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
__a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
@slow
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a = RegNetModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
__a = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(__lowercase )
__a = self.default_image_processor
__a = prepare_img()
__a = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__a = model(**__lowercase )
# verify the logits
__a = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__a = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1E-4 ) )
| 225 |
import random
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
__a = num - 1
__a = 0
while s % 2 == 0:
__a = s // 2
t += 1
for _ in range(5 ):
__a = random.randrange(2 , num - 1 )
__a = pow(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if v != 1:
__a = 0
while v != (num - 1):
if i == t - 1:
return False
else:
__a = i + 1
__a = (v**2) % num
return True
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num < 2:
return False
__a = [
2,
3,
5,
7,
11,
13,
17,
19,
23,
29,
31,
37,
41,
43,
47,
53,
59,
61,
67,
71,
73,
79,
83,
89,
97,
101,
103,
107,
109,
113,
127,
131,
137,
139,
149,
151,
157,
163,
167,
173,
179,
181,
191,
193,
197,
199,
211,
223,
227,
229,
233,
239,
241,
251,
257,
263,
269,
271,
277,
281,
283,
293,
307,
311,
313,
317,
331,
337,
347,
349,
353,
359,
367,
373,
379,
383,
389,
397,
401,
409,
419,
421,
431,
433,
439,
443,
449,
457,
461,
463,
467,
479,
487,
491,
499,
503,
509,
521,
523,
541,
547,
557,
563,
569,
571,
577,
587,
593,
599,
601,
607,
613,
617,
619,
631,
641,
643,
647,
653,
659,
661,
673,
677,
683,
691,
701,
709,
719,
727,
733,
739,
743,
751,
757,
761,
769,
773,
787,
797,
809,
811,
821,
823,
827,
829,
839,
853,
857,
859,
863,
877,
881,
883,
887,
907,
911,
919,
929,
937,
941,
947,
953,
967,
971,
977,
983,
991,
997,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 1024 ):
"""simple docstring"""
while True:
__a = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(_SCREAMING_SNAKE_CASE ):
return num
if __name__ == "__main__":
lowerCamelCase__ = generate_large_prime()
print(("""Prime number:""", num))
print(("""is_prime_low_num:""", is_prime_low_num(num)))
| 225 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : List[str] = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class __lowercase ( _lowercase ):
lowerCamelCase : List[Any] = "instructblip_vision_model"
def __init__(self , A=1_4_0_8 , A=6_1_4_4 , A=3_9 , A=1_6 , A=2_2_4 , A=1_4 , A="gelu" , A=1E-6 , A=0.0 , A=1E-10 , A=True , **A , ):
super().__init__(**A )
lowerCamelCase_ : List[Any] = hidden_size
lowerCamelCase_ : Any = intermediate_size
lowerCamelCase_ : Any = num_hidden_layers
lowerCamelCase_ : str = num_attention_heads
lowerCamelCase_ : List[str] = patch_size
lowerCamelCase_ : Optional[Any] = image_size
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Tuple = attention_dropout
lowerCamelCase_ : str = layer_norm_eps
lowerCamelCase_ : List[Any] = hidden_act
lowerCamelCase_ : List[str] = qkv_bias
@classmethod
def UpperCAmelCase__ (cls , A , **A ):
cls._set_token_in_kwargs(A )
lowerCamelCase_ : int = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowerCamelCase_ : List[str] = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A , **A )
class __lowercase ( _lowercase ):
lowerCamelCase : List[str] = "instructblip_qformer"
def __init__(self , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=5_1_2 , A=0.02 , A=1E-12 , A=0 , A="absolute" , A=2 , A=1_4_0_8 , **A , ):
super().__init__(pad_token_id=A , **A )
lowerCamelCase_ : Dict = vocab_size
lowerCamelCase_ : int = hidden_size
lowerCamelCase_ : Union[str, Any] = num_hidden_layers
lowerCamelCase_ : Optional[Any] = num_attention_heads
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : List[str] = hidden_dropout_prob
lowerCamelCase_ : Any = attention_probs_dropout_prob
lowerCamelCase_ : Optional[int] = max_position_embeddings
lowerCamelCase_ : Any = initializer_range
lowerCamelCase_ : Tuple = layer_norm_eps
lowerCamelCase_ : Optional[int] = position_embedding_type
lowerCamelCase_ : int = cross_attention_frequency
lowerCamelCase_ : Tuple = encoder_hidden_size
@classmethod
def UpperCAmelCase__ (cls , A , **A ):
cls._set_token_in_kwargs(A )
lowerCamelCase_ : List[Any] = cls.get_config_dict(A , **A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get('''model_type''' ) == "instructblip":
lowerCamelCase_ : Optional[Any] = config_dict['''qformer_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(A , **A )
class __lowercase ( _lowercase ):
lowerCamelCase : Union[str, Any] = "instructblip"
lowerCamelCase : Tuple = True
def __init__(self , A=None , A=None , A=None , A=3_2 , **A ):
super().__init__(**A )
if vision_config is None:
lowerCamelCase_ : int = {}
logger.info('''vision_config is None. initializing the InstructBlipVisionConfig with default values.''' )
if qformer_config is None:
lowerCamelCase_ : int = {}
logger.info('''qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.''' )
if text_config is None:
lowerCamelCase_ : Union[str, Any] = {}
logger.info('''text_config is None. Initializing the text config with default values (`OPTConfig`).''' )
lowerCamelCase_ : Optional[Any] = InstructBlipVisionConfig(**A )
lowerCamelCase_ : List[str] = InstructBlipQFormerConfig(**A )
lowerCamelCase_ : Dict = text_config['''model_type'''] if '''model_type''' in text_config else '''opt'''
lowerCamelCase_ : Union[str, Any] = CONFIG_MAPPING[text_model_type](**A )
lowerCamelCase_ : Union[str, Any] = self.text_config.tie_word_embeddings
lowerCamelCase_ : List[Any] = self.text_config.is_encoder_decoder
lowerCamelCase_ : int = num_query_tokens
lowerCamelCase_ : Union[str, Any] = self.vision_config.hidden_size
lowerCamelCase_ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase_ : str = 1.0
lowerCamelCase_ : int = 0.02
@classmethod
def UpperCAmelCase__ (cls , A , A , A , **A , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **A , )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Optional[int] = self.vision_config.to_dict()
lowerCamelCase_ : int = self.qformer_config.to_dict()
lowerCamelCase_ : Optional[int] = self.text_config.to_dict()
lowerCamelCase_ : str = self.__class__.model_type
return output
| 708 |
'''simple docstring'''
import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotSmallConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
__lowercase : Union[str, Any] = '''platform'''
import jax
import jax.numpy as jnp
from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
shift_tokens_right,
)
def lowercase_ ( _lowercase , _lowercase , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , _lowercase=None , ) -> Tuple:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase_ : Union[str, Any] = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
lowerCamelCase_ : List[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
lowerCamelCase_ : List[Any] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase_ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase_ : Tuple = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __lowercase :
def __init__(self , A , A=1_3 , A=7 , A=True , A=False , A=9_9 , A=1_6 , A=2 , A=4 , A=4 , A="gelu" , A=0.1 , A=0.1 , A=3_2 , A=2 , A=1 , A=0 , A=0.02 , ):
lowerCamelCase_ : int = parent
lowerCamelCase_ : Optional[Any] = batch_size
lowerCamelCase_ : Union[str, Any] = seq_length
lowerCamelCase_ : List[str] = is_training
lowerCamelCase_ : Optional[Any] = use_labels
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Optional[int] = hidden_size
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : Tuple = num_attention_heads
lowerCamelCase_ : Union[str, Any] = intermediate_size
lowerCamelCase_ : Union[str, Any] = hidden_act
lowerCamelCase_ : Tuple = hidden_dropout_prob
lowerCamelCase_ : List[str] = attention_probs_dropout_prob
lowerCamelCase_ : Any = max_position_embeddings
lowerCamelCase_ : Union[str, Any] = eos_token_id
lowerCamelCase_ : Union[str, Any] = pad_token_id
lowerCamelCase_ : str = bos_token_id
lowerCamelCase_ : int = initializer_range
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Optional[int] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
lowerCamelCase_ : List[Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
lowerCamelCase_ : Any = shift_tokens_right(A , 1 , 2 )
lowerCamelCase_ : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=A , )
lowerCamelCase_ : List[str] = prepare_blenderbot_inputs_dict(A , A , A )
return config, inputs_dict
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Any = 2_0
lowerCamelCase_ : Dict = model_class_name(A )
lowerCamelCase_ : List[str] = model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase_, lowerCamelCase_ : Tuple = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , A , A )
lowerCamelCase_ : Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
lowerCamelCase_ : Tuple = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
lowerCamelCase_ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase_ : Union[str, Any] = model.decode(
decoder_input_ids[:, -1:] , A , decoder_attention_mask=A , past_key_values=outputs_cache.past_key_values , decoder_position_ids=A , )
lowerCamelCase_ : str = model.decode(A , A )
lowerCamelCase_ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
def UpperCAmelCase__ (self , A , A , A ):
lowerCamelCase_ : Tuple = 2_0
lowerCamelCase_ : Dict = model_class_name(A )
lowerCamelCase_ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
lowerCamelCase_, lowerCamelCase_ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
lowerCamelCase_ : List[str] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
lowerCamelCase_ : Any = model.init_cache(decoder_input_ids.shape[0] , A , A )
lowerCamelCase_ : Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
lowerCamelCase_ : List[Any] = model.decode(
decoder_input_ids[:, :-1] , A , decoder_attention_mask=A , past_key_values=A , decoder_position_ids=A , )
lowerCamelCase_ : List[str] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
lowerCamelCase_ : str = model.decode(
decoder_input_ids[:, -1:] , A , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=A , decoder_position_ids=A , )
lowerCamelCase_ : Union[str, Any] = model.decode(A , A , decoder_attention_mask=A )
lowerCamelCase_ : Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=F"""Max diff is {diff}""" )
@require_flax
class __lowercase ( unittest.TestCase ):
lowerCamelCase : Optional[int] = 99
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Union[str, Any] = np.array(
[
[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2],
[6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2],
[5, 9_7, 1_7, 3_9, 9_4, 4_0, 2],
[7_6, 8_3, 9_4, 2_5, 7_0, 7_8, 2],
[8_7, 5_9, 4_1, 3_5, 4_8, 6_6, 2],
[5_5, 1_3, 1_6, 5_8, 5, 2, 1], # note padding
[6_4, 2_7, 3_1, 5_1, 1_2, 7_5, 2],
[5_2, 6_4, 8_6, 1_7, 8_3, 3_9, 2],
[4_8, 6_1, 9, 2_4, 7_1, 8_2, 2],
[2_6, 1, 6_0, 4_8, 2_2, 1_3, 2],
[2_1, 5, 6_2, 2_8, 1_4, 7_6, 2],
[4_5, 9_8, 3_7, 8_6, 5_9, 4_8, 2],
[7_0, 7_0, 5_0, 9, 2_8, 0, 2],
] , dtype=np.intaa , )
lowerCamelCase_ : Optional[int] = input_ids.shape[0]
lowerCamelCase_ : int = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=2_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=3_2 , decoder_ffn_dim=3_2 , max_position_embeddings=4_8 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ : Any = self._get_config_and_data()
lowerCamelCase_ : List[str] = FlaxBlenderbotSmallForConditionalGeneration(A )
lowerCamelCase_ : Optional[Any] = lm_model(input_ids=A )
lowerCamelCase_ : List[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Any = BlenderbotSmallConfig(
vocab_size=self.vocab_size , d_model=1_4 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=4_8 , )
lowerCamelCase_ : Union[str, Any] = FlaxBlenderbotSmallForConditionalGeneration(A )
lowerCamelCase_ : List[Any] = np.array([[7_1, 8_2, 1_8, 3_3, 4_6, 9_1, 2], [6_8, 3_4, 2_6, 5_8, 3_0, 2, 1]] , dtype=np.intaa )
lowerCamelCase_ : str = np.array([[8_2, 7_1, 8_2, 1_8, 2], [5_8, 6_8, 2, 1, 1]] , dtype=np.intaa )
lowerCamelCase_ : Optional[int] = lm_model(input_ids=A , decoder_input_ids=A )
lowerCamelCase_ : List[str] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_ : Tuple = np.array([[7_1, 8_2, 1_8, 3_3, 2, 1, 1], [6_8, 3_4, 2_6, 5_8, 3_0, 8_2, 2]] , dtype=np.intaa )
lowerCamelCase_ : Dict = shift_tokens_right(A , 1 , 2 )
lowerCamelCase_ : Union[str, Any] = np.equal(A , 1 ).astype(np.floataa ).sum()
lowerCamelCase_ : List[str] = np.equal(A , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(A , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __lowercase ( _lowercase , unittest.TestCase , _lowercase ):
lowerCamelCase : Dict = True
lowerCamelCase : str = (
(
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCamelCase : List[Any] = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else ()
def UpperCAmelCase__ (self ):
lowerCamelCase_ : List[Any] = FlaxBlenderbotSmallModelTester(self )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(A , A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(A , A , A )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Optional[int] = self._prepare_for_class(A , A )
lowerCamelCase_ : Any = model_class(A )
@jax.jit
def encode_jitted(A , A=None , **A ):
return model.encode(input_ids=A , attention_mask=A )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase_ : int = encode_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase_ : Union[str, Any] = encode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
def UpperCAmelCase__ (self ):
lowerCamelCase_, lowerCamelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCamelCase_ : Any = model_class(A )
lowerCamelCase_ : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
lowerCamelCase_ : Union[str, Any] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(A , A , A ):
return model.decode(
decoder_input_ids=A , decoder_attention_mask=A , encoder_outputs=A , )
with self.subTest('''JIT Enabled''' ):
lowerCamelCase_ : Tuple = decode_jitted(**A ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
lowerCamelCase_ : Optional[Any] = decode_jitted(**A ).to_tuple()
self.assertEqual(len(A ) , len(A ) )
for jitted_output, output in zip(A , A ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def UpperCAmelCase__ (self ):
for model_class_name in self.all_model_classes:
lowerCamelCase_ : Union[str, Any] = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
lowerCamelCase_ : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id
lowerCamelCase_ : Optional[int] = model(A )
self.assertIsNotNone(A )
| 357 | 0 |
from __future__ import annotations
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : int =0.0_0
__magic_name__ : Tuple =0
for resistor in resistors:
if resistor <= 0:
__magic_name__ : Optional[int] =F"Resistor at index {index} has a negative or zero value!"
raise ValueError(lowerCamelCase )
first_sum += 1 / float(lowerCamelCase )
index += 1
return 1 / first_sum
def lowerCAmelCase_ ( lowerCamelCase ):
__magic_name__ : Optional[int] =0.0_0
__magic_name__ : Optional[Any] =0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__magic_name__ : Optional[int] =F"Resistor at index {index} has a negative value!"
raise ValueError(lowerCamelCase )
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 21 |
'''simple docstring'''
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
return x + 2
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = "x = 3"
a__ = {}
a__ = evaluate(a__ ,{} ,state=a__ )
assert result == 3
self.assertDictEqual(a__ ,{"x": 3} )
a__ = "x = y"
a__ = {"y": 5}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ ,{"x": 5, "y": 5} )
def lowerCAmelCase_ ( self : str ):
a__ = "y = add_two(x)"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "y": 5} )
# Won't work without the tool
with CaptureStdout() as out:
a__ = evaluate(a__ ,{} ,state=a__ )
assert result is None
assert "tried to execute add_two" in out.out
def lowerCAmelCase_ ( self : Any ):
a__ = "x = 3"
a__ = {}
a__ = evaluate(a__ ,{} ,state=a__ )
assert result == 3
self.assertDictEqual(a__ ,{"x": 3} )
def lowerCAmelCase_ ( self : Dict ):
a__ = "test_dict = {'x': x, 'y': add_two(x)}"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
self.assertDictEqual(a__ ,{"x": 3, "y": 5} )
self.assertDictEqual(a__ ,{"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase_ ( self : Dict ):
a__ = "x = 3\ny = 5"
a__ = {}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "y": 5} )
def lowerCAmelCase_ ( self : str ):
a__ = "text = f'This is x: {x}.'"
a__ = {"x": 3}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(a__ ,{"x": 3, "text": "This is x: 3."} )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = "if x <= 3:\n y = 2\nelse:\n y = 5"
a__ = {"x": 3}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(a__ ,{"x": 3, "y": 2} )
a__ = {"x": 8}
a__ = evaluate(a__ ,{} ,state=a__ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(a__ ,{"x": 8, "y": 5} )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = "test_list = [x, add_two(x)]"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
self.assertListEqual(a__ ,[3, 5] )
self.assertDictEqual(a__ ,{"x": 3, "test_list": [3, 5]} )
def lowerCAmelCase_ ( self : Any ):
a__ = "y = x"
a__ = {"x": 3}
a__ = evaluate(a__ ,{} ,state=a__ )
assert result == 3
self.assertDictEqual(a__ ,{"x": 3, "y": 3} )
def lowerCAmelCase_ ( self : Tuple ):
a__ = "test_list = [x, add_two(x)]\ntest_list[1]"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "test_list": [3, 5]} )
a__ = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
a__ = {"x": 3}
a__ = evaluate(a__ ,{"add_two": add_two} ,state=a__ )
assert result == 5
self.assertDictEqual(a__ ,{"x": 3, "test_dict": {"x": 3, "y": 5}} )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = "x = 0\nfor i in range(3):\n x = i"
a__ = {}
a__ = evaluate(a__ ,{"range": range} ,state=a__ )
assert result == 2
self.assertDictEqual(a__ ,{"x": 2, "i": 2} )
| 331 | 0 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase) -> Tuple:
"""simple docstring"""
if openai_config_file == "":
UpperCamelCase = OpenAIGPTConfig()
else:
UpperCamelCase = OpenAIGPTConfig.from_json_file(_UpperCamelCase)
UpperCamelCase = OpenAIGPTModel(_UpperCamelCase)
# Load weights from numpy
load_tf_weights_in_openai_gpt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase)
# Save pytorch-model
UpperCamelCase = pytorch_dump_folder_path + '/' + WEIGHTS_NAME
UpperCamelCase = pytorch_dump_folder_path + '/' + CONFIG_NAME
print(F'Save PyTorch model to {pytorch_weights_dump_path}')
torch.save(model.state_dict() , _UpperCamelCase)
print(F'Save configuration file to {pytorch_config_dump_path}')
with open(_UpperCamelCase , 'w' , encoding='utf-8') as f:
f.write(config.to_json_string())
if __name__ == "__main__":
__magic_name__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--openai_checkpoint_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the TensorFlow checkpoint path.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--openai_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
__magic_name__ : List[str] = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 410 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_realm import RealmTokenizer
__magic_name__ : Optional[Any] = logging.get_logger(__name__)
__magic_name__ : Optional[int] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
__magic_name__ : str = {
'''vocab_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/vocab.txt'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/vocab.txt'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/vocab.txt''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/vocab.txt''',
},
'''tokenizer_file''': {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/tokenizer.jsont'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/tokenizer.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-openqa''': (
'''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-nq-reader''': (
'''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-openqa''': (
'''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/tokenizer.json'''
),
'''google/realm-orqa-wq-reader''': (
'''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/tokenizer.json'''
),
},
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': 512,
'''google/realm-cc-news-pretrained-encoder''': 512,
'''google/realm-cc-news-pretrained-scorer''': 512,
'''google/realm-cc-news-pretrained-openqa''': 512,
'''google/realm-orqa-nq-openqa''': 512,
'''google/realm-orqa-nq-reader''': 512,
'''google/realm-orqa-wq-openqa''': 512,
'''google/realm-orqa-wq-reader''': 512,
}
__magic_name__ : Any = {
'''google/realm-cc-news-pretrained-embedder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-encoder''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-scorer''': {'''do_lower_case''': True},
'''google/realm-cc-news-pretrained-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-nq-reader''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-openqa''': {'''do_lower_case''': True},
'''google/realm-orqa-wq-reader''': {'''do_lower_case''': True},
}
class A__ ( __snake_case ):
'''simple docstring'''
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_INIT_CONFIGURATION
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = RealmTokenizer
def __init__( self : str , _SCREAMING_SNAKE_CASE : Union[str, Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Optional[Any]=True , _SCREAMING_SNAKE_CASE : Tuple="[UNK]" , _SCREAMING_SNAKE_CASE : Optional[int]="[SEP]" , _SCREAMING_SNAKE_CASE : Dict="[PAD]" , _SCREAMING_SNAKE_CASE : Any="[CLS]" , _SCREAMING_SNAKE_CASE : int="[MASK]" , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : List[str]=None , **_SCREAMING_SNAKE_CASE : int , ):
"""simple docstring"""
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCamelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCamelCase = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
UpperCamelCase = do_lower_case
UpperCamelCase = strip_accents
UpperCamelCase = tokenize_chinese_chars
UpperCamelCase = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , _SCREAMING_SNAKE_CASE : str , **_SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
UpperCamelCase = PaddingStrategy.MAX_LENGTH
UpperCamelCase = text
UpperCamelCase = kwargs.pop('text_pair' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = kwargs.pop('return_tensors' , _SCREAMING_SNAKE_CASE )
UpperCamelCase = {
'input_ids': [],
'attention_mask': [],
'token_type_ids': [],
}
for idx, candidate_text in enumerate(_SCREAMING_SNAKE_CASE ):
if batch_text_pair is not None:
UpperCamelCase = batch_text_pair[idx]
else:
UpperCamelCase = None
UpperCamelCase = super().__call__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
UpperCamelCase = encoded_candidates.get('input_ids' )
UpperCamelCase = encoded_candidates.get('attention_mask' )
UpperCamelCase = encoded_candidates.get('token_type_ids' )
if encoded_input_ids is not None:
output_data["input_ids"].append(_SCREAMING_SNAKE_CASE )
if encoded_attention_mask is not None:
output_data["attention_mask"].append(_SCREAMING_SNAKE_CASE )
if encoded_token_type_ids is not None:
output_data["token_type_ids"].append(_SCREAMING_SNAKE_CASE )
UpperCamelCase = {key: item for key, item in output_data.items() if len(_SCREAMING_SNAKE_CASE ) != 0}
return BatchEncoding(_SCREAMING_SNAKE_CASE , tensor_type=_SCREAMING_SNAKE_CASE )
def _SCREAMING_SNAKE_CASE ( self : Any , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any]=None ):
"""simple docstring"""
UpperCamelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None ):
"""simple docstring"""
UpperCamelCase = [self.sep_token_id]
UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : int , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
UpperCamelCase = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 410 | 1 |
lowercase_ = """Input must be a string of 8 numbers plus letter"""
lowercase_ = """TRWAGMYFPDXBNJZSQVHLCKE"""
def a__ ( snake_case ):
"""simple docstring"""
if not isinstance(snake_case , snake_case ):
__SCREAMING_SNAKE_CASE : List[Any] = F'''Expected string as input, found {type(snake_case ).__name__}'''
raise TypeError(snake_case )
__SCREAMING_SNAKE_CASE : List[str] = spanish_id.replace('''-''' , '''''' ).upper()
if len(snake_case ) != 9:
raise ValueError(snake_case )
try:
__SCREAMING_SNAKE_CASE : Optional[Any] = int(spanish_id_clean[0:8] )
__SCREAMING_SNAKE_CASE : Tuple = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(snake_case ) from ex
if letter.isdigit():
raise ValueError(snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 74 |
import html
from ...feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from ...utils import is_bsa_available, logging, requires_backends
if is_bsa_available():
import bsa
from bsa import BeautifulSoup
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , **_A : Dict ):
"""simple docstring"""
requires_backends(self , ['''bs4'''] )
super().__init__(**_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : Any ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = []
__SCREAMING_SNAKE_CASE : Any = []
__SCREAMING_SNAKE_CASE : Union[str, Any] = element if element.name else element.parent
for parent in child.parents: # type: bs4.element.Tag
__SCREAMING_SNAKE_CASE : Optional[int] = parent.find_all(child.name , recursive=_A )
xpath_tags.append(child.name )
xpath_subscripts.append(
0 if 1 == len(_A ) else next(i for i, s in enumerate(_A , 1 ) if s is child ) )
__SCREAMING_SNAKE_CASE : Any = parent
xpath_tags.reverse()
xpath_subscripts.reverse()
return xpath_tags, xpath_subscripts
def UpperCAmelCase__ ( self : Dict , _A : Optional[int] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[int] = BeautifulSoup(_A , '''html.parser''' )
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Optional[Any] = []
__SCREAMING_SNAKE_CASE : int = []
for element in html_code.descendants:
if type(_A ) == bsa.element.NavigableString:
if type(element.parent ) != bsa.element.Tag:
continue
__SCREAMING_SNAKE_CASE : List[Any] = html.unescape(_A ).strip()
if not text_in_this_tag:
continue
all_doc_strings.append(_A )
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Union[str, Any] = self.xpath_soup(_A )
stringaxtag_seq.append(_A )
stringaxsubs_seq.append(_A )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xtags does not correspond''' )
if len(_A ) != len(_A ):
raise ValueError('''Number of doc strings and xsubs does not correspond''' )
return all_doc_strings, stringaxtag_seq, stringaxsubs_seq
def UpperCAmelCase__ ( self : int , _A : Tuple , _A : List[str] ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = ''''''
for tagname, subs in zip(_A , _A ):
xpath += F'''/{tagname}'''
if subs != 0:
xpath += F'''[{subs}]'''
return xpath
def __call__( self : Optional[int] , _A : Tuple ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = False
# Check that strings has a valid type
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE : Any = True
elif isinstance(_A , (list, tuple) ):
if len(_A ) == 0 or isinstance(html_strings[0] , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = True
if not valid_strings:
raise ValueError(
'''HTML strings must of type `str`, `List[str]` (batch of examples), '''
F'''but is of type {type(_A )}.''' )
__SCREAMING_SNAKE_CASE : Any = bool(isinstance(_A , (list, tuple) ) and (isinstance(html_strings[0] , _A )) )
if not is_batched:
__SCREAMING_SNAKE_CASE : Dict = [html_strings]
# Get nodes + xpaths
__SCREAMING_SNAKE_CASE : str = []
__SCREAMING_SNAKE_CASE : Tuple = []
for html_string in html_strings:
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Optional[Any] = self.get_three_from_single(_A )
nodes.append(_A )
__SCREAMING_SNAKE_CASE : Dict = []
for node, tag_list, sub_list in zip(_A , _A , _A ):
__SCREAMING_SNAKE_CASE : List[Any] = self.construct_xpath(_A , _A )
xpath_strings.append(_A )
xpaths.append(_A )
# return as Dict
__SCREAMING_SNAKE_CASE : Optional[int] = {'''nodes''': nodes, '''xpaths''': xpaths}
__SCREAMING_SNAKE_CASE : List[str] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs
| 74 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowercase :Optional[int] = {
"configuration_rag": ["RagConfig"],
"retrieval_rag": ["RagRetriever"],
"tokenization_rag": ["RagTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :str = [
"RagModel",
"RagPreTrainedModel",
"RagSequenceForGeneration",
"RagTokenForGeneration",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase :Optional[int] = [
"TFRagModel",
"TFRagPreTrainedModel",
"TFRagSequenceForGeneration",
"TFRagTokenForGeneration",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
__lowercase :Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 26 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( lowercase__ ):
"""simple docstring"""
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "CLIPImageProcessor"
snake_case_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Any , a : List[Any]=None , a : Any=None , **a : int ) ->int:
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a , )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop("feature_extractor" )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a , a )
def __call__( self : Tuple , a : Tuple=None , a : Union[str, Any]=None , a : List[str]=None , **a : Optional[Any] ) ->Optional[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
SCREAMING_SNAKE_CASE__ : str = self.tokenizer(a , return_tensors=a , **a )
if images is not None:
SCREAMING_SNAKE_CASE__ : int = self.image_processor(a , return_tensors=a , **a )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Tuple = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a ) , tensor_type=a )
def A_ ( self : Optional[int] , *a : Any , **a : List[str] ) ->Any:
return self.tokenizer.batch_decode(*a , **a )
def A_ ( self : Any , *a : Optional[int] , **a : Dict ) ->Any:
return self.tokenizer.decode(*a , **a )
@property
def A_ ( self : List[str] ) ->Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A_ ( self : Optional[int] ) ->List[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a , )
return self.image_processor_class
@property
def A_ ( self : Dict ) ->str:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , a , )
return self.image_processor | 26 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
lowercase_ = logging.getLogger(__name__)
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__lowerCamelCase : Optional[str] = field(
default=A , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__lowerCamelCase : Optional[str] = field(
default=A , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__lowerCamelCase : Optional[str] = field(
default=A , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
__lowerCamelCase : bool = field(
default=A , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
__lowerCamelCase : str = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
__lowerCamelCase : bool = field(
default=A , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : Optional[str] = field(default=A , metadata={'help': 'The input training data file (a text file).'} )
__lowerCamelCase : Optional[str] = field(
default=A , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__lowerCamelCase : bool = field(
default=A , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
__lowerCamelCase : Optional[int] = field(
default=A , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
__lowerCamelCase : Optional[int] = field(
default=A , metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__lowerCamelCase : bool = field(
default=A , metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
} , )
__lowerCamelCase : Optional[int] = field(
default=A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
} , )
__lowerCamelCase : Optional[int] = field(
default=A , metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
} , )
def a__ (self ) -> str:
"""simple docstring"""
if self.train_file is not None:
_a = self.train_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
_a = self.validation_file.split('''.''' )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class __A :
'''simple docstring'''
__lowerCamelCase : PreTrainedTokenizerBase
__lowerCamelCase : Union[bool, str, PaddingStrategy] = True
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Optional[int] = None
def __call__(self , A ) -> int:
"""simple docstring"""
_a = '''label''' if '''label''' in features[0].keys() else '''labels'''
_a = [feature.pop(A ) for feature in features]
_a = len(A )
_a = len(features[0]['''input_ids'''] )
_a = [
[{k: v[i] for k, v in feature.items()} for i in range(A )] for feature in features
]
_a = list(chain(*A ) )
_a = self.tokenizer.pad(
A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , )
# Un-flatten
_a = {k: v.view(A , A , -1 ) for k, v in batch.items()}
# Add back labels
_a = torch.tensor(A , dtype=torch.intaa )
return batch
def lowerCAmelCase ():
"""simple docstring"""
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith('''.json'''):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_a , _a , _a = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
_a , _a , _a = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_swag''' , __A , __A)
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout)] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
_a = training_args.get_process_log_level()
logger.setLevel(__A)
datasets.utils.logging.set_verbosity(__A)
transformers.utils.logging.set_verbosity(__A)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fpaa}''')
logger.info(F'''Training/evaluation parameters {training_args}''')
# Detecting last checkpoint.
_a = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
_a = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
'''Use --overwrite_output_dir to overcome.''')
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''')
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
_a = {}
if data_args.train_file is not None:
_a = data_args.train_file
if data_args.validation_file is not None:
_a = data_args.validation_file
_a = data_args.train_file.split('''.''')[-1]
_a = load_dataset(
__A , data_files=__A , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
_a = load_dataset(
'''swag''' , '''regular''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
_a = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path) , config=__A , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
_a = [F'''ending{i}''' for i in range(4)]
_a = '''sent1'''
_a = '''sent2'''
if data_args.max_seq_length is None:
_a = tokenizer.model_max_length
if max_seq_length > 1_024:
logger.warning(
'''The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value'''
''' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can'''
''' override this default with `--block_size xxx`.''')
_a = 1_024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''')
_a = min(data_args.max_seq_length , tokenizer.model_max_length)
# Preprocessing the datasets.
def preprocess_function(__A):
_a = [[context] * 4 for context in examples[context_name]]
_a = examples[question_header_name]
_a = [
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__A)
]
# Flatten out
_a = list(chain(*__A))
_a = list(chain(*__A))
# Tokenize
_a = tokenizer(
__A , __A , truncation=__A , max_length=__A , padding='''max_length''' if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__A) , 4)] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError('''--do_train requires a train dataset''')
_a = raw_datasets['''train''']
if data_args.max_train_samples is not None:
_a = min(len(__A) , data_args.max_train_samples)
_a = train_dataset.select(range(__A))
with training_args.main_process_first(desc='''train dataset map pre-processing'''):
_a = train_dataset.map(
__A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError('''--do_eval requires a validation dataset''')
_a = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
_a = min(len(__A) , data_args.max_eval_samples)
_a = eval_dataset.select(range(__A))
with training_args.main_process_first(desc='''validation dataset map pre-processing'''):
_a = eval_dataset.map(
__A , batched=__A , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
_a = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__A , pad_to_multiple_of=8 if training_args.fpaa else None)
)
# Metric
def compute_metrics(__A):
_a , _a = eval_predictions
_a = np.argmax(__A , axis=1)
return {"accuracy": (preds == label_ids).astype(np.floataa).mean().item()}
# Initialize our Trainer
_a = Trainer(
model=__A , args=__A , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__A , data_collator=__A , compute_metrics=__A , )
# Training
if training_args.do_train:
_a = None
if training_args.resume_from_checkpoint is not None:
_a = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
_a = last_checkpoint
_a = trainer.train(resume_from_checkpoint=__A)
trainer.save_model() # Saves the tokenizer too for easy upload
_a = train_result.metrics
_a = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__A)
)
_a = min(__A , len(__A))
trainer.log_metrics('''train''' , __A)
trainer.save_metrics('''train''' , __A)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''')
_a = trainer.evaluate()
_a = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__A)
_a = min(__A , len(__A))
trainer.log_metrics('''eval''' , __A)
trainer.save_metrics('''eval''' , __A)
_a = {
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__A)
else:
trainer.create_model_card(**__A)
def lowerCAmelCase (__A):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 11 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
lowerCAmelCase :Dict = datasets.utils.logging.get_logger(__name__)
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilderConfig ):
'''simple docstring'''
A_ : bool = None
A_ : bool = None
class _lowerCamelCase ( folder_based_builder.FolderBasedBuilder ):
'''simple docstring'''
A_ : Union[str, Any] = datasets.Audio()
A_ : Tuple = """audio"""
A_ : Optional[Any] = AudioFolderConfig
A_ : List[str] # definition at the bottom of the script
A_ : Any = AudioClassification(audio_column="""audio""" , label_column="""label""" )
lowerCAmelCase :List[str] = [
'''.aiff''',
'''.au''',
'''.avr''',
'''.caf''',
'''.flac''',
'''.htk''',
'''.svx''',
'''.mat4''',
'''.mat5''',
'''.mpc2k''',
'''.ogg''',
'''.paf''',
'''.pvf''',
'''.raw''',
'''.rf64''',
'''.sd2''',
'''.sds''',
'''.ircam''',
'''.voc''',
'''.w64''',
'''.wav''',
'''.nist''',
'''.wavex''',
'''.wve''',
'''.xi''',
'''.mp3''',
'''.opus''',
]
lowerCAmelCase :str = AUDIO_EXTENSIONS | 561 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING, is_vision_available, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _UpperCAmelCase :
@staticmethod
def lowerCamelCase__ ( *snake_case_ , **snake_case_ ):
pass
@is_pipeline_test
@require_vision
@require_torch
class _UpperCAmelCase ( unittest.TestCase):
__lowercase : str = MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
def lowerCamelCase__ ( self , snake_case_ , snake_case_ , snake_case_ ):
_snake_case : str = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
_snake_case : int = [
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
]
return object_detector, examples
def lowerCamelCase__ ( self , snake_case_ , snake_case_ ):
_snake_case : Optional[int] = object_detector(examples[0] , threshold=0.0 )
_snake_case : Tuple = len(lowerCAmelCase_ )
self.assertGreater(lowerCAmelCase_ , 0 )
self.assertEqual(
lowerCAmelCase_ , [
{
"score": ANY(lowerCAmelCase_ ),
"label": ANY(lowerCAmelCase_ ),
"box": {"xmin": ANY(lowerCAmelCase_ ), "ymin": ANY(lowerCAmelCase_ ), "xmax": ANY(lowerCAmelCase_ ), "ymax": ANY(lowerCAmelCase_ )},
}
for i in range(lowerCAmelCase_ )
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase__ ( self ):
pass
@require_torch
def lowerCamelCase__ ( self ):
_snake_case : Tuple = pipeline(
"zero-shot-object-detection" , model="hf-internal-testing/tiny-random-owlvit-object-detection" )
_snake_case : Any = object_detector(
"./tests/fixtures/tests_samples/COCO/000000039769.png" , candidate_labels=["cat", "remote", "couch"] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
] , )
_snake_case : List[Any] = object_detector(
[
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"candidate_labels": ["cat", "remote", "couch"],
}
] , threshold=0.64 , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.7235, "label": "cat", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7218, "label": "remote", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.7184, "label": "couch", "box": {"xmin": 2_04, "ymin": 1_67, "xmax": 2_32, "ymax": 1_90}},
{"score": 0.6748, "label": "remote", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6656, "label": "cat", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6614, "label": "couch", "box": {"xmin": 5_71, "ymin": 83, "xmax": 5_98, "ymax": 1_03}},
{"score": 0.6456, "label": "remote", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
{"score": 0.642, "label": "remote", "box": {"xmin": 67, "ymin": 2_74, "xmax": 93, "ymax": 2_97}},
{"score": 0.6419, "label": "cat", "box": {"xmin": 4_94, "ymin": 1_05, "xmax": 5_21, "ymax": 1_27}},
]
] , )
@require_torch
@slow
def lowerCamelCase__ ( self ):
_snake_case : Any = pipeline("zero-shot-object-detection" )
_snake_case : Tuple = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
] , )
_snake_case : int = object_detector(
[
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
{
"image": "http://images.cocodataset.org/val2017/000000039769.jpg",
"candidate_labels": ["cat", "remote", "couch"],
},
] , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
[
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
{"score": 0.1474, "label": "remote", "box": {"xmin": 3_35, "ymin": 74, "xmax": 3_71, "ymax": 1_87}},
{"score": 0.1208, "label": "couch", "box": {"xmin": 4, "ymin": 0, "xmax": 6_42, "ymax": 4_76}},
],
] , )
@require_tf
@unittest.skip("Zero Shot Object Detection not implemented in TF" )
def lowerCamelCase__ ( self ):
pass
@require_torch
@slow
def lowerCamelCase__ ( self ):
_snake_case : Dict = 0.2
_snake_case : List[Any] = pipeline("zero-shot-object-detection" )
_snake_case : Optional[int] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , threshold=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
{"score": 0.2537, "label": "cat", "box": {"xmin": 1, "ymin": 55, "xmax": 3_15, "ymax": 4_72}},
] , )
@require_torch
@slow
def lowerCamelCase__ ( self ):
_snake_case : List[str] = 2
_snake_case : Union[str, Any] = pipeline("zero-shot-object-detection" )
_snake_case : List[Any] = object_detector(
"http://images.cocodataset.org/val2017/000000039769.jpg" , candidate_labels=["cat", "remote", "couch"] , top_k=lowerCAmelCase_ , )
self.assertEqual(
nested_simplify(lowerCAmelCase_ , decimals=4 ) , [
{"score": 0.2868, "label": "cat", "box": {"xmin": 3_24, "ymin": 20, "xmax": 6_40, "ymax": 3_73}},
{"score": 0.277, "label": "remote", "box": {"xmin": 40, "ymin": 72, "xmax": 1_77, "ymax": 1_15}},
] , )
| 711 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 87 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 84 |
def __lowerCamelCase ( _lowercase ) -> str:
return "".join(chr(ord(_lowercase ) - 32 ) if 'a' <= char <= 'z' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 282 | 0 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :str ) -> list:
if n_term == "":
return []
__lowerCAmelCase : list = []
for temp in range(int(SCREAMING_SNAKE_CASE ) ):
series.append(F'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
_UpperCAmelCase = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term)) | 718 |
from math import asin, atan, cos, radians, sin, sqrt, tan
_UpperCAmelCase = 6_378_137.0
_UpperCAmelCase = 6_356_752.314_245
_UpperCAmelCase = 637_8137
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float , SCREAMING_SNAKE_CASE :float ) -> float:
__lowerCAmelCase : List[Any] = (AXIS_A - AXIS_B) / AXIS_A
__lowerCAmelCase : Tuple = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase : str = atan((1 - flattening) * tan(radians(SCREAMING_SNAKE_CASE ) ) )
__lowerCAmelCase : int = radians(SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = radians(SCREAMING_SNAKE_CASE )
# Equation
__lowerCAmelCase : List[str] = sin((phi_a - phi_a) / 2 )
__lowerCAmelCase : Optional[int] = sin((lambda_a - lambda_a) / 2 )
# Square both values
sin_sq_phi *= sin_sq_phi
sin_sq_lambda *= sin_sq_lambda
__lowerCAmelCase : Optional[Any] = sqrt(sin_sq_phi + (cos(SCREAMING_SNAKE_CASE ) * cos(SCREAMING_SNAKE_CASE ) * sin_sq_lambda) )
return 2 * RADIUS * asin(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 240 | 0 |
"""simple docstring"""
def A__ ( A__ = 5000_0000 ) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = int((limit - 24) ** (1 / 2) )
_UpperCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , __lowercase ) ) )
for primea in primes:
_UpperCAmelCase = primea * primea
for primea in primes:
_UpperCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_UpperCAmelCase = primea * primea * primea * primea
_UpperCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(__lowercase )
return len(__lowercase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 426 | import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase ( __A , __A , __A ):
'''simple docstring'''
@register_to_config
def __init__( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase = False , ):
"""simple docstring"""
super().__init__()
A_ : Optional[int] = nn.Embedding(lowercase , lowercase )
A_ : Any = nn.Embedding(lowercase , lowercase )
A_ : int = False
A_ : Tuple = nn.Dropout(p=lowercase )
A_ : List[Any] = TaConfig(
vocab_size=lowercase , d_model=lowercase , num_heads=lowercase , d_kv=lowercase , d_ff=lowercase , dropout_rate=lowercase , feed_forward_proj=lowercase , is_decoder=lowercase , is_encoder_decoder=lowercase , )
A_ : List[str] = nn.ModuleList()
for lyr_num in range(lowercase ):
A_ : Dict = TaBlock(lowercase )
self.encoders.append(lowercase )
A_ : Any = TaLayerNorm(lowercase )
A_ : Optional[int] = nn.Dropout(p=lowercase )
def lowerCAmelCase_ ( self , lowercase , lowercase ):
"""simple docstring"""
A_ : Any = self.token_embedder(lowercase )
A_ : List[Any] = encoder_input_tokens.shape[1]
A_ : int = torch.arange(lowercase , device=encoder_input_tokens.device )
x += self.position_encoding(lowercase )
A_ : Dict = self.dropout_pre(lowercase )
# inverted the attention mask
A_ : List[Any] = encoder_input_tokens.size()
A_ : Dict = self.get_extended_attention_mask(lowercase , lowercase )
for lyr in self.encoders:
A_ : List[str] = lyr(lowercase , lowercase )[0]
A_ : Optional[int] = self.layer_norm(lowercase )
return self.dropout_post(lowercase ), encoder_inputs_mask
| 558 | 0 |
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
a_ :Optional[int] = logging.getLogger(__name__)
a_ :Tuple = tf.data.AUTOTUNE
def lowercase_ ():
snake_case__ : List[Any] = argparse.ArgumentParser(description='Train a masked language model on TPU.' )
parser.add_argument(
'--pretrained_model_config' , type=lowerCAmelCase__ , default='roberta-base' , help='The model config to use. Note that we don\'t copy the model\'s weights, only the config!' , )
parser.add_argument(
'--tokenizer' , type=lowerCAmelCase__ , default='unigram-tokenizer-wikitext' , help='The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.' , )
parser.add_argument(
'--per_replica_batch_size' , type=lowerCAmelCase__ , default=8 , help='Batch size per TPU core.' , )
parser.add_argument(
'--no_tpu' , action='store_true' , help='If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.' , )
parser.add_argument(
'--tpu_name' , type=lowerCAmelCase__ , help='Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.' , default='local' , )
parser.add_argument(
'--tpu_zone' , type=lowerCAmelCase__ , help='Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.' , )
parser.add_argument(
'--gcp_project' , type=lowerCAmelCase__ , help='Google cloud project name. Only used for non-Colab TPU nodes.' )
parser.add_argument(
'--bfloat16' , action='store_true' , help='Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.' , )
parser.add_argument(
'--train_dataset' , type=lowerCAmelCase__ , help='Path to training dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--shuffle_buffer_size' , type=lowerCAmelCase__ , default=2**1_8 , help='Size of the shuffle buffer (in samples)' , )
parser.add_argument(
'--eval_dataset' , type=lowerCAmelCase__ , help='Path to evaluation dataset to load. If the path begins with `gs://`'
' then the dataset will be loaded from a Google Cloud Storage bucket.' , )
parser.add_argument(
'--num_epochs' , type=lowerCAmelCase__ , default=1 , help='Number of epochs to train for.' , )
parser.add_argument(
'--learning_rate' , type=lowerCAmelCase__ , default=1e-4 , help='Learning rate to use for training.' , )
parser.add_argument(
'--weight_decay_rate' , type=lowerCAmelCase__ , default=1e-3 , help='Weight decay rate to use for training.' , )
parser.add_argument(
'--max_length' , type=lowerCAmelCase__ , default=5_1_2 , help='Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py' , )
parser.add_argument(
'--mlm_probability' , type=lowerCAmelCase__ , default=0.15 , help='Fraction of tokens to mask during training.' , )
parser.add_argument('--output_dir' , type=lowerCAmelCase__ , required=lowerCAmelCase__ , help='Path to save model checkpoints to.' )
parser.add_argument('--hub_model_id' , type=lowerCAmelCase__ , help='Model ID to upload to on the Hugging Face Hub.' )
snake_case__ : Union[str, Any] = parser.parse_args()
return args
def lowercase_ (A : List[str] ):
try:
if args.tpu_name:
snake_case__ : Dict = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
snake_case__ : Any = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '
'--gcp_project. When running on a TPU VM, use --tpu_name local.' )
tf.config.experimental_connect_to_cluster(lowerCAmelCase__ )
tf.tpu.experimental.initialize_tpu_system(lowerCAmelCase__ )
return tpu
def lowercase_ (A : List[Any] ):
snake_case__ : Union[str, Any] = 0
for file in file_list:
snake_case__ : Dict = file.split('/' )[-1]
snake_case__ : Any = re.search(r'-\d+-(\d+)\.tfrecord' , lowerCAmelCase__ ).group(1 )
snake_case__ : Dict = int(lowerCAmelCase__ )
num_samples += sample_count
return num_samples
def lowercase_ (A : Tuple , A : Optional[Any] , A : Optional[Any] , A : Any , A : Dict , A : List[str]=None ):
snake_case__ : Union[str, Any] = count_samples(lowerCAmelCase__ )
snake_case__ : int = tf.data.Dataset.from_tensor_slices(lowerCAmelCase__ )
if shuffle:
snake_case__ : List[str] = dataset.shuffle(len(lowerCAmelCase__ ) )
snake_case__ : Optional[int] = tf.data.TFRecordDataset(lowerCAmelCase__ , num_parallel_reads=lowerCAmelCase__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
snake_case__ : List[Any] = dataset.apply(tf.data.experimental.assert_cardinality(lowerCAmelCase__ ) )
snake_case__ : List[str] = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
if shuffle:
assert shuffle_buffer_size is not None
snake_case__ : Any = dataset.shuffle(args.shuffle_buffer_size )
snake_case__ : str = dataset.batch(lowerCAmelCase__ , drop_remainder=lowerCAmelCase__ )
snake_case__ : Any = dataset.map(lowerCAmelCase__ , num_parallel_calls=lowerCAmelCase__ )
snake_case__ : int = dataset.prefetch(lowerCAmelCase__ )
return dataset
def lowercase_ (A : Optional[Any] ):
if not args.no_tpu:
snake_case__ : Tuple = initialize_tpu(lowerCAmelCase__ )
snake_case__ : str = tf.distribute.TPUStrategy(lowerCAmelCase__ )
else:
snake_case__ : List[Any] = tf.distribute.OneDeviceStrategy(device='/gpu:0' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('mixed_bfloat16' )
snake_case__ : str = AutoTokenizer.from_pretrained(args.tokenizer )
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(args.pretrained_model_config )
snake_case__ : Any = tokenizer.vocab_size
snake_case__ : str = tf.io.gfile.glob(os.path.join(args.train_dataset , '*.tfrecord' ) )
if not training_records:
raise ValueError(F'''No .tfrecord files found in {args.train_dataset}.''' )
snake_case__ : str = tf.io.gfile.glob(os.path.join(args.eval_dataset , '*.tfrecord' ) )
if not eval_records:
raise ValueError(F'''No .tfrecord files found in {args.eval_dataset}.''' )
snake_case__ : Optional[Any] = count_samples(lowerCAmelCase__ )
snake_case__ : int = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
snake_case__ : Optional[Any] = steps_per_epoch * args.num_epochs
with strategy.scope():
snake_case__ : Optional[Any] = TFAutoModelForMaskedLM.from_config(lowerCAmelCase__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
snake_case__ : str = create_optimizer(
num_train_steps=lowerCAmelCase__ , num_warmup_steps=total_train_steps // 2_0 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowerCAmelCase__ , metrics=['accuracy'] )
def decode_fn(A : Optional[int] ):
snake_case__ : int = {
'input_ids': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'attention_mask': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowerCAmelCase__ , lowerCAmelCase__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
snake_case__ : Union[str, Any] = DataCollatorForLanguageModeling(
tokenizer=lowerCAmelCase__ , mlm_probability=args.mlm_probability , mlm=lowerCAmelCase__ , return_tensors='tf' )
def mask_with_collator(A : Optional[Any] ):
# TF really needs an isin() function
snake_case__ : Tuple = (
~tf.cast(batch['attention_mask'] , tf.bool )
| (batch['input_ids'] == tokenizer.cls_token_id)
| (batch['input_ids'] == tokenizer.sep_token_id)
)
snake_case__ : Optional[int] = data_collator.tf_mask_tokens(
batch['input_ids'] , vocab_size=len(lowerCAmelCase__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowerCAmelCase__ , )
return batch
snake_case__ : Any = args.per_replica_batch_size * strategy.num_replicas_in_sync
snake_case__ : Optional[int] = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , shuffle_buffer_size=args.shuffle_buffer_size , )
snake_case__ : List[Any] = prepare_dataset(
lowerCAmelCase__ , decode_fn=lowerCAmelCase__ , mask_fn=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , shuffle=lowerCAmelCase__ , )
snake_case__ : str = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowerCAmelCase__ ) )
model.fit(
lowerCAmelCase__ , validation_data=lowerCAmelCase__ , epochs=args.num_epochs , callbacks=lowerCAmelCase__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
a_ :Union[str, Any] = parse_args()
main(args)
| 709 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
a_ :Dict = None
a_ :List[str] = logging.get_logger(__name__)
a_ :Dict = "▁"
a_ :Dict = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
a_ :Union[str, Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
a_ :List[Any] = {
"google/pegasus-xsum": 512,
}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PegasusTokenizer
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : Any, _snake_case : Any=None, _snake_case : Optional[Any]=None, _snake_case : Tuple="<pad>", _snake_case : Tuple="</s>", _snake_case : List[str]="<unk>", _snake_case : Any="<mask_2>", _snake_case : Optional[Any]="<mask_1>", _snake_case : Tuple=None, _snake_case : str=1_0_3, **_snake_case : Dict, ) ->List[Any]:
snake_case__ : Any = offset
if additional_special_tokens is not None:
if not isinstance(_snake_case, _snake_case ):
raise TypeError(
F'''additional_special_tokens should be of type {type(_snake_case )}, but is'''
F''' {type(_snake_case )}''' )
snake_case__ : int = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
F'''<unk_{i}>''' for i in range(len(_snake_case ), self.offset - 1 )
]
if len(set(_snake_case ) ) != len(_snake_case ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
F''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
snake_case__ : Optional[int] = additional_special_tokens_extended
else:
snake_case__ : Optional[Any] = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [F'''<unk_{i}>''' for i in range(2, self.offset )]
super().__init__(
_snake_case, tokenizer_file=_snake_case, pad_token=_snake_case, eos_token=_snake_case, unk_token=_snake_case, mask_token=_snake_case, mask_token_sent=_snake_case, offset=_snake_case, additional_special_tokens=_snake_case, **_snake_case, )
snake_case__ : str = vocab_file
snake_case__ : int = False if not self.vocab_file else True
def lowercase_ ( self : Optional[Any], _snake_case : str ) ->Dict:
snake_case__ : int = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
F''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def lowercase_ ( self : Dict, _snake_case : List, _snake_case : Optional[List] = None, _snake_case : bool = False ) ->List[int]:
if already_has_special_tokens:
return self._special_token_mask(_snake_case )
elif token_ids_a is None:
return self._special_token_mask(_snake_case ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def lowercase_ ( self : Any, _snake_case : Union[str, Any], _snake_case : Union[str, Any]=None ) ->List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def lowercase_ ( self : Optional[Any], _snake_case : str, _snake_case : Optional[str] = None ) ->Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_snake_case ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
_snake_case, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ):
copyfile(self.vocab_file, _snake_case )
return (out_vocab_file,)
| 243 | 0 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
UpperCAmelCase_ : int = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
UpperCAmelCase_ : int = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
UpperCAmelCase_ : Optional[Any] = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def snake_case__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def snake_case__ ( self , __lowercase , __lowercase , __lowercase=False ):
"""simple docstring"""
__A : Union[str, Any] = spearmanr(_snake_case , _snake_case )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 365 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
A : Any = 'scheduler_config.json'
class __A( a ):
snake_case_ = 1
snake_case_ = 2
snake_case_ = 3
snake_case_ = 4
snake_case_ = 5
@dataclass
class __A( a ):
snake_case_ = 42
class __A:
snake_case_ = SCHEDULER_CONFIG_NAME
snake_case_ = ['''dtype''']
snake_case_ = []
snake_case_ = True
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case = None , _snake_case = None , _snake_case=False , **_snake_case , ) -> List[Any]:
'''simple docstring'''
__a , __a = cls.load_config(
pretrained_model_name_or_path=_snake_case , subfolder=_snake_case , return_unused_kwargs=_snake_case , **_snake_case , )
__a , __a = cls.from_config(_snake_case , return_unused_kwargs=_snake_case , **_snake_case )
if hasattr(_snake_case , '''create_state''' ) and getattr(_snake_case , '''has_state''' , _snake_case ):
__a = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = False , **_snake_case ) -> List[Any]:
'''simple docstring'''
self.save_config(save_directory=_snake_case , push_to_hub=_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE_ ( self ) -> Optional[int]:
'''simple docstring'''
return self._get_compatibles()
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls ) -> Any:
'''simple docstring'''
__a = list(set([cls.__name__] + cls._compatibles ) )
__a = importlib.import_module(__name__.split('''.''' )[0] )
__a = [
getattr(_snake_case , _snake_case ) for c in compatible_classes_str if hasattr(_snake_case , _snake_case )
]
return compatible_classes
def __lowerCAmelCase ( a__ , a__ ) -> jnp.ndarray:
assert len(a__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(a__ ) - x.ndim) ) , a__ )
def __lowerCAmelCase ( a__ , a__=0.999 , a__=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(a__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
__a = []
for i in range(a__ ):
__a = i / num_diffusion_timesteps
__a = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(a__ ) / alpha_bar(a__ ) , a__ ) )
return jnp.array(a__ , dtype=a__ )
@flax.struct.dataclass
class __A:
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
@classmethod
def SCREAMING_SNAKE_CASE_ ( cls , _snake_case ) -> int:
'''simple docstring'''
__a = scheduler.config
if config.trained_betas is not None:
__a = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__a = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__a = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__a = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
__a = 1.0 - betas
__a = jnp.cumprod(_snake_case , axis=0 )
return cls(
alphas=_snake_case , betas=_snake_case , alphas_cumprod=_snake_case , )
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Dict:
__a = state.alphas_cumprod
__a = alphas_cumprod[timesteps] ** 0.5
__a = sqrt_alpha_prod.flatten()
__a = broadcast_to_shape_from_left(a__ , original_samples.shape )
__a = (1 - alphas_cumprod[timesteps]) ** 0.5
__a = sqrt_one_minus_alpha_prod.flatten()
__a = broadcast_to_shape_from_left(a__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> int:
__a , __a = get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
__a = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCAmelCase ( a__ , a__ , a__ , a__ ) -> Union[str, Any]:
__a , __a = get_sqrt_alpha_prod(a__ , a__ , a__ , a__ )
__a = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity | 219 | 0 |
'''simple docstring'''
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def lowercase__ ( __lowercase : Optional[int] ) -> str:
"""simple docstring"""
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def lowercase__ ( __lowercase : Dict , __lowercase : str ) -> str:
"""simple docstring"""
__UpperCamelCase = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
__UpperCamelCase = key.replace('heads.cmd.mim_head.cls.predictions' , 'mmm_image_head' )
__UpperCamelCase = key.replace('heads.cmd.mlm_head.cls.predictions' , 'mmm_text_head' )
__UpperCamelCase = key.replace('heads.cmd.itm_head.cls' , 'itm_head' )
__UpperCamelCase = key.replace('heads.cmd.itm_head.pooler' , 'itm_head.pooler' )
__UpperCamelCase = key.replace('heads.cmd.clip_head.logit_scale' , 'flava.logit_scale' )
__UpperCamelCase = key.replace('heads.fairseq_mlm.cls.predictions' , 'mlm_head' )
__UpperCamelCase = key.replace('heads.imagenet.mim_head.cls.predictions' , 'mim_head' )
__UpperCamelCase = key.replace('mm_text_projection' , 'flava.text_to_mm_projection' )
__UpperCamelCase = key.replace('mm_image_projection' , 'flava.image_to_mm_projection' )
__UpperCamelCase = key.replace('image_encoder.module' , 'flava.image_model' )
__UpperCamelCase = key.replace('text_encoder.module' , 'flava.text_model' )
__UpperCamelCase = key.replace('mm_encoder.module.encoder.cls_token' , 'flava.multimodal_model.cls_token' )
__UpperCamelCase = key.replace('mm_encoder.module' , 'flava.multimodal_model' )
__UpperCamelCase = key.replace('text_projection' , 'flava.text_projection' )
__UpperCamelCase = key.replace('image_projection' , 'flava.image_projection' )
__UpperCamelCase = value.float()
for key, value in codebook_state_dict.items():
__UpperCamelCase = value
return upgrade
@torch.no_grad()
def lowercase__ ( __lowercase : str , __lowercase : List[Any] , __lowercase : Any , __lowercase : Optional[int]=None ) -> str:
"""simple docstring"""
if config_path is not None:
__UpperCamelCase = FlavaConfig.from_pretrained(_snake_case )
else:
__UpperCamelCase = FlavaConfig()
__UpperCamelCase = FlavaForPreTraining(_snake_case ).eval()
__UpperCamelCase = convert_dalle_checkpoint(_snake_case , _snake_case , save_checkpoint=_snake_case )
if os.path.exists(_snake_case ):
__UpperCamelCase = torch.load(_snake_case , map_location='cpu' )
else:
__UpperCamelCase = torch.hub.load_state_dict_from_url(_snake_case , map_location='cpu' )
__UpperCamelCase = upgrade_state_dict(_snake_case , _snake_case )
hf_model.load_state_dict(_snake_case )
__UpperCamelCase = hf_model.state_dict()
__UpperCamelCase = count_parameters(_snake_case )
__UpperCamelCase = count_parameters(_snake_case ) + count_parameters(_snake_case )
assert torch.allclose(_snake_case , _snake_case , atol=1e-3 )
hf_model.save_pretrained(_snake_case )
if __name__ == "__main__":
a__ : int =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to flava checkpoint''')
parser.add_argument('''--codebook_path''', default=None, type=str, help='''Path to flava codebook checkpoint''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
a__ : Dict =parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 711 |
'''simple docstring'''
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowercase__ ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__lowercase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def lowercase__ ( ) -> Any:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def lowercase__ ( ) -> List[str]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__lowercase ):
http_head('https://huggingface.co' )
| 434 | 0 |
print((lambda quine: quine % quine)('print((lambda quine: quine %% quine)(%r))'))
| 101 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_snake_case : str = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =["""input_values""", """padding_mask"""]
def __init__( self, _a = 1, _a = 2_40_00, _a = 0.0, _a = None, _a = None, **_a, ) -> str:
super().__init__(feature_size=_a, sampling_rate=_a, padding_value=_a, **_a )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1, int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self, _a, _a = None, _a = False, _a = None, _a = None, _a = None, ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(_a, (list, tuple) ) and (isinstance(raw_audio[0], (np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a, dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_a, np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(_a, dtype=np.floataa )
elif isinstance(_a, np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(_a ).T]
# verify inputs are valid
for idx, example in enumerate(_a ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = "max_length"
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
_a, max_length=_a, truncation=_a, padding=_a, return_attention_mask=_a, )
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("attention_mask" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(_a )
return padded_inputs
| 693 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A__ ( __A , __A=False ):
'''simple docstring'''
try:
_lowerCamelCase : List[str] = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_lowerCamelCase : Optional[int] = default
else:
# KEY is set, convert it to True or False.
try:
_lowerCamelCase : List[str] = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
lowerCAmelCase : str =parse_flag_from_env("RUN_SLOW", default=False)
def A__ ( __A ):
'''simple docstring'''
return unittest.skip("""Test was skipped""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(_run_slow_tests , """test is slow""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(not torch.cuda.is_available() , """test requires only a CPU""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.is_available() , """test requires a GPU""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_xpu_available() , """test requires a XPU""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_mps_available() , """test requires a `mps` backend support in `torch`""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , """test requires the Hugging Face suite""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_bnb_available() , """test requires the bitsandbytes library""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_tpu_available() , """test requires TPU""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() == 1 , """test requires a GPU""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() == 1 , """test requires a XPU""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(torch.cuda.device_count() > 1 , """test requires multiple GPUs""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(torch.xpu.device_count() > 1 , """test requires multiple XPUs""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_safetensors_available() , """test requires safetensors""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_deepspeed_available() , """test requires DeepSpeed""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_torch_version(""">=""" , """1.12.0""" ) , """test requires torch version >= 1.12.0""" )(__A )
def A__ ( __A=None , __A=None ):
'''simple docstring'''
if test_case is None:
return partial(__A , version=__A )
return unittest.skipUnless(is_torch_version(""">=""" , __A ) , F"""test requires torch version >= {version}""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_tensorboard_available() , """test requires Tensorboard""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_wandb_available() , """test requires wandb""" )(__A )
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(is_comet_ml_available() , """test requires comet_ml""" )(__A )
lowerCAmelCase : Union[str, Any] =(
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A__ ( __A ):
'''simple docstring'''
return unittest.skipUnless(
_atleast_one_tracker_available , """test requires at least one tracker to be available and for `comet_ml` to not be installed""" , )(__A )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
_snake_case = True
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any]) ->List[Any]:
"""simple docstring"""
_lowerCamelCase : Optional[int] = tempfile.mkdtemp()
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : int) ->List[Any]:
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def _SCREAMING_SNAKE_CASE ( self : Any) ->Union[str, Any]:
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob("""**/*"""):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_UpperCamelCase)
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : int) ->Any:
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] , _UpperCamelCase : Union[mock.Mock, List[mock.Mock]]) ->List[str]:
"""simple docstring"""
_lowerCamelCase : Optional[Any] = mocks if isinstance(_UpperCamelCase , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A__ ( __A ):
'''simple docstring'''
_lowerCamelCase : Tuple = AcceleratorState()
_lowerCamelCase : Tuple = tensor[None].clone().to(state.device )
_lowerCamelCase : int = gather(__A ).cpu()
_lowerCamelCase : Optional[int] = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __A ):
return False
return True
class __snake_case :
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple) ->List[str]:
"""simple docstring"""
_lowerCamelCase : List[str] = returncode
_lowerCamelCase : Union[str, Any] = stdout
_lowerCamelCase : List[Any] = stderr
async def A__ ( __A , __A ):
'''simple docstring'''
while True:
_lowerCamelCase : List[str] = await stream.readline()
if line:
callback(__A )
else:
break
async def A__ ( __A , __A=None , __A=None , __A=None , __A=False , __A=False ):
'''simple docstring'''
if echo:
print("""\nRunning: """ , """ """.join(__A ) )
_lowerCamelCase : Dict = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_lowerCamelCase : int = []
_lowerCamelCase : int = []
def tee(__A , __A , __A , __A="" ):
_lowerCamelCase : Dict = line.decode("""utf-8""" ).rstrip()
sink.append(__A )
if not quiet:
print(__A , __A , file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __A : tee(__A , __A , sys.stdout , label="""stdout:""" ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __A : tee(__A , __A , sys.stderr , label="""stderr:""" ) ) ),
] , timeout=__A , )
return _RunOutput(await p.wait() , __A , __A )
def A__ ( __A , __A=None , __A=None , __A=180 , __A=False , __A=True ):
'''simple docstring'''
_lowerCamelCase : List[Any] = asyncio.get_event_loop()
_lowerCamelCase : Any = loop.run_until_complete(
_stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) )
_lowerCamelCase : Optional[int] = """ """.join(__A )
if result.returncode > 0:
_lowerCamelCase : Optional[Any] = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __snake_case ( __lowerCAmelCase ):
'''simple docstring'''
pass
def A__ ( __A , __A=False ):
'''simple docstring'''
try:
_lowerCamelCase : Optional[Any] = subprocess.check_output(__A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__A , """decode""" ):
_lowerCamelCase : List[str] = output.decode("""utf-8""" )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{' '.join(__A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 15 | import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple) ->int:
"""simple docstring"""
_lowerCamelCase : Tuple = XLMRobertaModel.from_pretrained("""xlm-roberta-base""")
_lowerCamelCase : Optional[int] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : Optional[Any] = torch.Size((1, 12, 768)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : str = torch.tensor(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : List[str] = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any]) ->Optional[Any]:
"""simple docstring"""
_lowerCamelCase : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""")
_lowerCamelCase : Optional[Any] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]])
# The dog is cute and lives in the garden house
_lowerCamelCase : str = torch.Size((1, 12, 1024)) # batch_size, sequence_length, embedding_vector_dim
_lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0_6_9_9, -0.0_3_1_8, 0.0_7_0_5, -0.1_2_4_1, 0.0_9_9_9, -0.0_5_2_0, 0.1_0_0_4, -0.1_8_3_8, -0.4_7_0_4, 0.1_4_3_7, 0.0_8_2_1, 0.0_1_2_6]])
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
_lowerCamelCase : int = model(_UpperCamelCase)["""last_hidden_state"""].detach()
self.assertEqual(output.shape , _UpperCamelCase)
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _UpperCamelCase , atol=1E-3))
| 15 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCamelCase = logging.get_logger(__name__)
__UpperCamelCase = [
["attention", "attn"],
["encoder_attention", "encoder_attn"],
["q_lin", "q_proj"],
["k_lin", "k_proj"],
["v_lin", "v_proj"],
["out_lin", "out_proj"],
["norm_embeddings", "layernorm_embedding"],
["position_embeddings", "embed_positions"],
["embeddings", "embed_tokens"],
["ffn.lin", "fc"],
]
def _a ( _lowerCamelCase ) -> Union[str, Any]:
"""simple docstring"""
if k == "embeddings.weight":
return "shared.weight"
for parlai_name, hf_name in PATTERNS:
__snake_case : List[str] = k.replace(_lowerCamelCase , _lowerCamelCase )
if k.startswith("""encoder""" ):
__snake_case : Optional[int] = k.replace(""".attn""" , """.self_attn""" )
__snake_case : Tuple = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : List[str] = k.replace("""norm2""" , """final_layer_norm""" )
elif k.startswith("""decoder""" ):
__snake_case : List[Any] = k.replace("""norm1""" , """self_attn_layer_norm""" )
__snake_case : str = k.replace("""norm2""" , """encoder_attn_layer_norm""" )
__snake_case : Optional[int] = k.replace("""norm3""" , """final_layer_norm""" )
return k
def _a ( _lowerCamelCase ) -> Any:
"""simple docstring"""
__snake_case : Optional[int] = [
"""model.encoder.layernorm_embedding.weight""",
"""model.encoder.layernorm_embedding.bias""",
"""model.decoder.layernorm_embedding.weight""",
"""model.decoder.layernorm_embedding.bias""",
]
for k in keys:
__snake_case : Optional[Any] = sd.pop(_lowerCamelCase )
__snake_case : List[str] = k.replace("""layernorm_embedding""" , """layer_norm""" )
assert new_k not in sd
__snake_case : Union[str, Any] = v
__UpperCamelCase = ["START"]
@torch.no_grad()
def _a ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) -> Dict:
"""simple docstring"""
__snake_case : Optional[int] = torch.load(_lowerCamelCase , map_location="""cpu""" )
__snake_case : Dict = model["""model"""]
__snake_case : Optional[int] = BlenderbotConfig.from_json_file(_lowerCamelCase )
__snake_case : Union[str, Any] = BlenderbotForConditionalGeneration(_lowerCamelCase )
__snake_case : List[Any] = m.model.state_dict().keys()
__snake_case : int = []
__snake_case : Union[str, Any] = {}
for k, v in sd.items():
if k in IGNORE_KEYS:
continue
__snake_case : Optional[int] = rename_state_dict_key(_lowerCamelCase )
if new_k not in valid_keys:
failures.append([k, new_k] )
else:
__snake_case : str = v
if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm
rename_layernorm_keys(_lowerCamelCase )
m.model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
m.half()
m.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin")
parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.")
parser.add_argument(
"--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use"
)
__UpperCamelCase = parser.parse_args()
convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
| 26 |
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> list[int]:
lowercase : int = [True] * limit
lowercase : Tuple = False
lowercase : List[Any] = False
lowercase : Union[str, Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
lowercase : Tuple = i * 2
while index < limit:
lowercase : Optional[int] = False
lowercase : Optional[int] = index + i
lowercase : int = [2]
for i in range(3 , SCREAMING_SNAKE_CASE__ , 2 ):
if is_prime[i]:
primes.append(SCREAMING_SNAKE_CASE__ )
return primes
def _snake_case( SCREAMING_SNAKE_CASE__ = 1_000_000 ) -> int:
lowercase : int = prime_sieve(SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = 0
lowercase : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE__ ) ):
for j in range(i + length , len(SCREAMING_SNAKE_CASE__ ) ):
lowercase : Optional[int] = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase : Any = j - i
lowercase : int = sol
return largest
if __name__ == "__main__":
print(F'''{solution() = }''')
| 336 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=7 , lowerCAmelCase_=3 , lowerCAmelCase_=30 , lowerCAmelCase_=4_00 , lowerCAmelCase_=True , lowerCAmelCase_=None , lowerCAmelCase_=0.9 , lowerCAmelCase_=None , lowerCAmelCase_=True , lowerCAmelCase_=[0.5, 0.5, 0.5] , lowerCAmelCase_=[0.5, 0.5, 0.5] , ):
'''simple docstring'''
a_ : Any = size if size is not None else {"""shortest_edge""": 30}
a_ : Any = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
a_ : Optional[Any] = parent
a_ : List[Any] = batch_size
a_ : Optional[int] = num_channels
a_ : Union[str, Any] = min_resolution
a_ : List[Any] = max_resolution
a_ : Dict = do_resize_and_center_crop
a_ : Union[str, Any] = size
a_ : Any = crop_pct
a_ : str = crop_size
a_ : List[Any] = do_normalize
a_ : Optional[Any] = image_mean
a_ : int = image_std
def _lowerCAmelCase ( self ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class _UpperCAmelCase ( lowerCAmelCase__ ,unittest.TestCase ):
"""simple docstring"""
a_ = PoolFormerImageProcessor if is_vision_available() else None
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : str = PoolFormerImageProcessingTester(self )
@property
def _lowerCAmelCase ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """size""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """crop_pct""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase_ , """image_std""" ) )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
a_ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _lowerCAmelCase ( self ):
'''simple docstring'''
pass
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
a_ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , Image.Image )
# Test not batched input
a_ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
a_ : List[str] = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
a_ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , numpify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , np.ndarray )
# Test not batched input
a_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
a_ : Optional[Any] = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _lowerCAmelCase ( self ):
'''simple docstring'''
a_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
a_ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase_ , torchify=lowerCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase_ , torch.Tensor )
# Test not batched input
a_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
a_ : Optional[Any] = image_processing(lowerCAmelCase_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 460 |
'''simple docstring'''
from torch import nn
def _snake_case ( A_ : Dict ):
"""simple docstring"""
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 460 | 1 |
import numpy as np
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ = 1E-12 , lowercase_ = 1_00 , ) -> tuple[float, np.ndarray]:
'''simple docstring'''
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[1]
# Ensure proper dimensionality.
assert np.shape(lowercase_ )[0] == np.shape(lowercase_ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(lowercase_ ) == np.iscomplexobj(lowercase_ )
lowercase__ : Dict = np.iscomplexobj(lowercase_ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(lowercase_ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = 0
lowercase__ : Tuple = 0
lowercase__ : Dict = 1E12
while not convergence:
# Multiple matrix by the vector.
lowercase__ : Tuple = np.dot(lowercase_ , lowercase_ )
# Normalize the resulting output vector.
lowercase__ : int = w / np.linalg.norm(lowercase_ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
lowercase__ : List[Any] = vector.conj().T if is_complex else vector.T
lowercase__ : Optional[Any] = np.dot(lowercase_ , np.dot(lowercase_ , lowercase_ ) )
# Check convergence.
lowercase__ : Union[str, Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
lowercase__ : Any = True
lowercase__ : List[str] = lambda_
if is_complex:
lowercase__ : Tuple = np.real(lambda_ )
return lambda_, vector
def UpperCamelCase ( ) -> None:
'''simple docstring'''
lowercase__ : Dict = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
lowercase__ : Any = np.array([41, 4, 20] )
lowercase__ : Optional[int] = real_input_matrix.astype(np.complexaaa )
lowercase__ : Tuple = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
lowercase__ : Any = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
lowercase__ : Tuple = real_input_matrix
lowercase__ : Optional[Any] = real_vector
elif problem_type == "complex":
lowercase__ : Optional[int] = complex_input_matrix
lowercase__ : int = complex_vector
# Our implementation.
lowercase__ , lowercase__ : Tuple = power_iteration(lowercase_ , lowercase_ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
lowercase__ , lowercase__ : Dict = np.linalg.eigh(lowercase_ )
# Last eigenvalue is the maximum one.
lowercase__ : List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
lowercase__ : Any = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1E-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(lowercase_ ) - np.abs(lowercase_ ) ) <= 1E-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 12 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : BigBirdConfig
__lowerCAmelCase : jnp.dtype = jnp.floataa
__lowerCAmelCase : bool = True
def lowercase__ ( self):
'''simple docstring'''
super().setup()
lowercase__ : Dict = nn.Dense(5 , dtype=self.dtype)
def __call__( self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = super().__call__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self.cls(outputs[2])
return outputs[:2] + (cls_out,)
class _snake_case ( UpperCAmelCase_ ):
__lowerCAmelCase : Optional[int] = FlaxBigBirdForNaturalQuestionsModule
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
lowercase__ : int = logits.shape[-1]
lowercase__ : List[str] = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype("""f4""" )
lowercase__ : int = jax.nn.log_softmax(lowercase_ , axis=-1 )
lowercase__ : Any = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
lowercase__ : Optional[int] = reduction(lowercase_ )
return loss
lowercase__ : int = partial(lowercase_ , reduction=jnp.mean )
lowercase__ : Tuple = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : List[Any] = cross_entropy(lowercase_ , lowercase_ )
lowercase__ : Union[str, Any] = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _snake_case :
__lowerCAmelCase : str = "google/bigbird-roberta-base"
__lowerCAmelCase : int = 3_000
__lowerCAmelCase : int = 10_500
__lowerCAmelCase : int = 128
__lowerCAmelCase : int = 3
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = 5
# tx_args
__lowerCAmelCase : float = 3e-5
__lowerCAmelCase : float = 0.0
__lowerCAmelCase : int = 20_000
__lowerCAmelCase : float = 0.0_095
__lowerCAmelCase : str = "bigbird-roberta-natural-questions"
__lowerCAmelCase : str = "training-expt"
__lowerCAmelCase : str = "data/nq-training.jsonl"
__lowerCAmelCase : str = "data/nq-validation.jsonl"
def lowercase__ ( self):
'''simple docstring'''
os.makedirs(self.base_dir , exist_ok=SCREAMING_SNAKE_CASE_)
lowercase__ : Any = os.path.join(self.base_dir , self.save_dir)
lowercase__ : str = self.batch_size_per_device * jax.device_count()
@dataclass
class _snake_case :
__lowerCAmelCase : int
__lowerCAmelCase : int = 4_096 # no dynamic padding on TPUs
def __call__( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = self.collate_fn(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = jax.tree_util.tree_map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ , lowercase__ : str = self.fetch_inputs(features["""input_ids"""])
lowercase__ : str = {
"""input_ids""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""attention_mask""": jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.intaa),
"""start_labels""": jnp.array(features["""start_token"""] , dtype=jnp.intaa),
"""end_labels""": jnp.array(features["""end_token"""] , dtype=jnp.intaa),
"""pooled_labels""": jnp.array(features["""category"""] , dtype=jnp.intaa),
}
return batch
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[Any] = [self._fetch_inputs(SCREAMING_SNAKE_CASE_) for ids in input_ids]
return zip(*SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = [1 for _ in range(len(SCREAMING_SNAKE_CASE_))]
while len(SCREAMING_SNAKE_CASE_) < self.max_length:
input_ids.append(self.pad_id)
attention_mask.append(0)
return input_ids, attention_mask
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_=None ) -> Optional[Any]:
'''simple docstring'''
if seed is not None:
lowercase__ : Any = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
lowercase__ : List[str] = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , lowercase_ , **lowercase_ ) -> int:
'''simple docstring'''
def loss_fn(lowercase_ ):
lowercase__ : Dict = model_inputs.pop("""start_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""end_labels""" )
lowercase__ : List[Any] = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Any = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
lowercase__ , lowercase__ : Optional[int] = jax.random.split(lowercase_ )
lowercase__ : Tuple = jax.value_and_grad(lowercase_ )
lowercase__ , lowercase__ : Optional[int] = grad_fn(state.params )
lowercase__ : Tuple = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
lowercase__ : Any = jax.lax.pmean(lowercase_ , """batch""" )
lowercase__ : str = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name="""batch""" )
def UpperCamelCase ( lowercase_ , **lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Tuple = model_inputs.pop("""start_labels""" )
lowercase__ : List[str] = model_inputs.pop("""end_labels""" )
lowercase__ : int = model_inputs.pop("""pooled_labels""" )
lowercase__ : List[Any] = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
lowercase__ , lowercase__ , lowercase__ : Optional[int] = outputs
lowercase__ : Optional[Any] = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : List[str] = jax.lax.pmean({"""loss""": loss} , axis_name="""batch""" )
return metrics
class _snake_case ( train_state.TrainState ):
__lowerCAmelCase : Callable = struct.field(pytree_node=UpperCAmelCase_ )
@dataclass
class _snake_case :
__lowerCAmelCase : Args
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : Callable
__lowerCAmelCase : wandb
__lowerCAmelCase : Callable = None
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None):
'''simple docstring'''
lowercase__ : List[str] = model.params
lowercase__ : Dict = TrainState.create(
apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , loss_fn=SCREAMING_SNAKE_CASE_ , )
if ckpt_dir is not None:
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : str = restore_checkpoint(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : str = {
"""lr""": args.lr,
"""init_lr""": args.init_lr,
"""warmup_steps""": args.warmup_steps,
"""num_train_steps""": num_train_steps,
"""weight_decay""": args.weight_decay,
}
lowercase__ , lowercase__ : Any = build_tx(**SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = train_state.TrainState(
step=SCREAMING_SNAKE_CASE_ , apply_fn=model.__call__ , params=SCREAMING_SNAKE_CASE_ , tx=SCREAMING_SNAKE_CASE_ , opt_state=SCREAMING_SNAKE_CASE_ , )
lowercase__ : Optional[Any] = args
lowercase__ : Union[str, Any] = data_collator
lowercase__ : str = lr
lowercase__ : Union[str, Any] = params
lowercase__ : Dict = jax_utils.replicate(SCREAMING_SNAKE_CASE_)
return state
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = self.args
lowercase__ : List[str] = len(SCREAMING_SNAKE_CASE_) // args.batch_size
lowercase__ : int = jax.random.PRNGKey(0)
lowercase__ : Union[str, Any] = jax.random.split(SCREAMING_SNAKE_CASE_ , jax.device_count())
for epoch in range(args.max_epochs):
lowercase__ : Tuple = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : List[str] = get_batched_dataset(SCREAMING_SNAKE_CASE_ , args.batch_size , seed=SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc=f'Running EPOCH-{epoch}'):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ , lowercase__ , lowercase__ : List[Any] = self.train_step_fn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
if i % args.logging_steps == 0:
lowercase__ : List[str] = jax_utils.unreplicate(state.step)
lowercase__ : str = running_loss.item() / i
lowercase__ : Tuple = self.scheduler_fn(state_step - 1)
lowercase__ : Tuple = self.evaluate(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = {
"""step""": state_step.item(),
"""eval_loss""": eval_loss.item(),
"""tr_loss""": tr_loss,
"""lr""": lr.item(),
}
tqdm.write(str(SCREAMING_SNAKE_CASE_))
self.logger.log(SCREAMING_SNAKE_CASE_ , commit=SCREAMING_SNAKE_CASE_)
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + f'-e{epoch}-s{i}' , state=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Dict = get_batched_dataset(SCREAMING_SNAKE_CASE_ , self.args.batch_size)
lowercase__ : Tuple = len(SCREAMING_SNAKE_CASE_) // self.args.batch_size
lowercase__ : Union[str, Any] = jnp.array(0 , dtype=jnp.floataa)
lowercase__ : Optional[Any] = 0
for batch in tqdm(SCREAMING_SNAKE_CASE_ , total=SCREAMING_SNAKE_CASE_ , desc="""Evaluating ... """):
lowercase__ : Tuple = self.data_collator(SCREAMING_SNAKE_CASE_)
lowercase__ : List[Any] = self.val_step_fn(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_)
running_loss += jax_utils.unreplicate(metrics["""loss"""])
i += 1
return running_loss / i
def lowercase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Tuple = jax_utils.unreplicate(SCREAMING_SNAKE_CASE_)
print(f'SAVING CHECKPOINT IN {save_dir}' , end=""" ... """)
self.model_save_fn(SCREAMING_SNAKE_CASE_ , params=state.params)
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """opt_state.msgpack""") , """wb""") as f:
f.write(to_bytes(state.opt_state))
joblib.dump(self.args , os.path.join(SCREAMING_SNAKE_CASE_ , """args.joblib"""))
joblib.dump(self.data_collator , os.path.join(SCREAMING_SNAKE_CASE_ , """data_collator.joblib"""))
with open(os.path.join(SCREAMING_SNAKE_CASE_ , """training_state.json""") , """w""") as f:
json.dump({"""step""": state.step.item()} , SCREAMING_SNAKE_CASE_)
print("""DONE""")
def UpperCamelCase ( lowercase_ , lowercase_ ) -> Optional[Any]:
'''simple docstring'''
print(F'RESTORING CHECKPOINT FROM {save_dir}' , end=""" ... """ )
with open(os.path.join(lowercase_ , """flax_model.msgpack""" ) , """rb""" ) as f:
lowercase__ : Optional[Any] = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , """opt_state.msgpack""" ) , """rb""" ) as f:
lowercase__ : Dict = from_bytes(state.opt_state , f.read() )
lowercase__ : Any = joblib.load(os.path.join(lowercase_ , """args.joblib""" ) )
lowercase__ : Optional[int] = joblib.load(os.path.join(lowercase_ , """data_collator.joblib""" ) )
with open(os.path.join(lowercase_ , """training_state.json""" ) , """r""" ) as f:
lowercase__ : int = json.load(lowercase_ )
lowercase__ : Optional[Any] = training_state["""step"""]
print("""DONE""" )
return params, opt_state, step, args, data_collator
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Tuple:
'''simple docstring'''
lowercase__ : Optional[int] = num_train_steps - warmup_steps
lowercase__ : int = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
lowercase__ : Optional[int] = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
lowercase__ : Any = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
'''simple docstring'''
def weight_decay_mask(lowercase_ ):
lowercase__ : Dict = traverse_util.flatten_dict(lowercase_ )
lowercase__ : int = {k: (v[-1] != """bias""" and v[-2:] != ("""LayerNorm""", """scale""")) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
lowercase__ : Optional[int] = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
lowercase__ : int = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 12 | 1 |
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def snake_case_ (__A : Accelerator , __A : int = 1_6 ) -> List[str]:
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__lowerCAmelCase : str = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__A : str ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase : Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__A , max_length=__A )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase : Union[str, Any] = datasets.map(
__A , batched=__A , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase : Optional[Any] = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__A : int ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase : List[Any] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase : Optional[Any] = 1_6
elif accelerator.mixed_precision != "no":
__lowerCAmelCase : str = 8
else:
__lowerCAmelCase : Optional[Any] = None
return tokenizer.pad(
__A , padding="""longest""" , max_length=__A , pad_to_multiple_of=__A , return_tensors="""pt""" , )
# Instantiate dataloaders.
__lowerCAmelCase : Dict = DataLoader(
tokenized_datasets["""train"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
__lowerCAmelCase : Optional[int] = DataLoader(
tokenized_datasets["""validation"""] , shuffle=__A , collate_fn=__A , batch_size=__A )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCAmelCase = mocked_dataloaders # noqa: F811
def snake_case_ (__A : int , __A : int ) -> Dict:
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , __A ) == "1":
__lowerCAmelCase : Union[str, Any] = 2
# Initialize accelerator
__lowerCAmelCase : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : Dict = config['''lr''']
__lowerCAmelCase : Tuple = int(config["""num_epochs"""] )
__lowerCAmelCase : List[Any] = int(config["""seed"""] )
__lowerCAmelCase : Tuple = int(config["""batch_size"""] )
__lowerCAmelCase : Union[str, Any] = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=__A )
def inner_training_loop(__A : Tuple ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(__A )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : List[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__A )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase : Any = AdamW(params=model.parameters() , lr=__A )
__lowerCAmelCase : Tuple = get_dataloaders(__A , __A )
# Instantiate scheduler
__lowerCAmelCase : Any = get_linear_schedule_with_warmup(
optimizer=__A , num_warmup_steps=1_0_0 , num_training_steps=(len(__A ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase : List[Any] = accelerator.prepare(
__A , __A , __A , __A , __A )
# Now we train the model
for epoch in range(__A ):
model.train()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase : List[str] = model(**__A )
__lowerCAmelCase : int = outputs.loss
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(**__A )
__lowerCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=__A , references=__A , )
__lowerCAmelCase : Optional[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __A )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def snake_case_ () -> List[str]:
__lowerCAmelCase : Any = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=__A , default=__A , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__lowerCAmelCase : Tuple = parser.parse_args()
__lowerCAmelCase : Optional[Any] = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 708 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase = {
"""configuration_mobilevit""": ["""MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MobileViTConfig""", """MobileViTOnnxConfig"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ["""MobileViTFeatureExtractor"""]
__UpperCAmelCase = ["""MobileViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MobileViTForImageClassification""",
"""MobileViTForSemanticSegmentation""",
"""MobileViTModel""",
"""MobileViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
"""TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFMobileViTForImageClassification""",
"""TFMobileViTForSemanticSegmentation""",
"""TFMobileViTModel""",
"""TFMobileViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 218 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def lowercase (_snake_case ,_snake_case ,_snake_case ) -> Dict:
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if inductance < 0:
raise ValueError("Inductance cannot be negative" )
if frequency < 0:
raise ValueError("Frequency cannot be negative" )
if reactance < 0:
raise ValueError("Inductive reactance cannot be negative" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 505 |
'''simple docstring'''
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __UpperCamelCase ( lowerCAmelCase_ , unittest.TestCase ):
A_ = CpmAntTokenizer
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().setUp()
__a : str = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
__a : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[Any] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
__a : int = '今天天气真好!'
__a : int = ['今天', '天气', '真', '好', '!']
__a : Optional[Any] = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
__a : Dict = '今天天气真好!'
__a : Union[str, Any] = [tokenizer.bos_token] + tokens
__a : int = [6, 9802, 1_4962, 2082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
__a : Any = tokenizer.decode(__a )
self.assertEqual(__a , __a )
| 476 | 0 |
"""simple docstring"""
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
"""snap-research/efficientformer-l1-300""": (
"""https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE_ ( _lowercase):
'''simple docstring'''
__magic_name__ : int = '''efficientformer'''
def __init__( self , lowerCamelCase__ = [3, 2, 6, 4] , lowerCamelCase__ = [48, 96, 224, 448] , lowerCamelCase__ = [True, True, True, True] , lowerCamelCase__ = 448 , lowerCamelCase__ = 32 , lowerCamelCase__ = 4 , lowerCamelCase__ = 7 , lowerCamelCase__ = 5 , lowerCamelCase__ = 8 , lowerCamelCase__ = 4 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 16 , lowerCamelCase__ = 3 , lowerCamelCase__ = 3 , lowerCamelCase__ = 3 , lowerCamelCase__ = 2 , lowerCamelCase__ = 1 , lowerCamelCase__ = 0.0 , lowerCamelCase__ = 1 , lowerCamelCase__ = True , lowerCamelCase__ = True , lowerCamelCase__ = 1E-5 , lowerCamelCase__ = "gelu" , lowerCamelCase__ = 0.02 , lowerCamelCase__ = 1E-12 , lowerCamelCase__ = 224 , lowerCamelCase__ = 1E-05 , **lowerCamelCase__ , ) -> None:
'''simple docstring'''
super().__init__(**lowerCamelCase__)
snake_case__ : List[Any] = hidden_act
snake_case__ : Optional[int] = hidden_dropout_prob
snake_case__ : str = hidden_sizes
snake_case__ : Dict = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Union[str, Any] = initializer_range
snake_case__ : str = layer_norm_eps
snake_case__ : int = patch_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : List[Any] = depths
snake_case__ : str = mlp_expansion_ratio
snake_case__ : Union[str, Any] = downsamples
snake_case__ : List[str] = dim
snake_case__ : Optional[Any] = key_dim
snake_case__ : Optional[int] = attention_ratio
snake_case__ : Optional[int] = resolution
snake_case__ : str = pool_size
snake_case__ : List[Any] = downsample_patch_size
snake_case__ : Union[str, Any] = downsample_stride
snake_case__ : Any = downsample_pad
snake_case__ : Any = drop_path_rate
snake_case__ : int = num_metaad_blocks
snake_case__ : Dict = distillation
snake_case__ : str = use_layer_scale
snake_case__ : List[str] = layer_scale_init_value
snake_case__ : Dict = image_size
snake_case__ : Dict = batch_norm_eps
| 150 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[int] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
def A__ ( *_UpperCAmelCase : str , **_UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : List[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : int , **_UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Dict , **_UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
def A__ ( *_UpperCAmelCase : Optional[int] , **_UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(_UpperCAmelCase , ["torch"] )
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Union[str, Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : str = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Optional[int] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Any = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Dict = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[str] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[str]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : List[Any] = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> str:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Any:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : int = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> int:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch"])
class SCREAMING_SNAKE_CASE_ ( metaclass=_lowercase):
'''simple docstring'''
__magic_name__ : Tuple = ['''torch''']
def __init__( self , *lowerCamelCase__ , **lowerCamelCase__) -> Tuple:
'''simple docstring'''
requires_backends(self , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["torch"])
@classmethod
def UpperCAmelCase ( cls , *lowerCamelCase__ , **lowerCamelCase__) -> Dict:
'''simple docstring'''
requires_backends(cls , ["torch"])
| 150 | 1 |
'''simple docstring'''
__a = 65_521
def __UpperCAmelCase ( a_: str ):
_UpperCAmelCase : Dict = 1
_UpperCAmelCase : Union[str, Any] = 0
for plain_chr in plain_text:
_UpperCAmelCase : List[Any] = (a + ord(a_ )) % MOD_ADLER
_UpperCAmelCase : Tuple = (b + a) % MOD_ADLER
return (b << 16) | a | 494 | '''simple docstring'''
import warnings
from .generation import TFGenerationMixin
class A__ ( UpperCamelCase ):
"""simple docstring"""
warnings.warn(
'''Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will '''
'''be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.''' , UpperCamelCase , ) | 494 | 1 |
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 708 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A__ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(__lowerCAmelCase ) , version.parse(__lowerCAmelCase ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase = None ) -> None:
"""simple docstring"""
snake_case__ : List[str] = f"""\n{hint}""" if hint is not None else ''''''
# non-versioned check
if re.match(r'''^[\w_\-\d]+$''' , __lowerCAmelCase ):
snake_case__ , snake_case__ , snake_case__ : Tuple = requirement, None, None
else:
snake_case__ : Union[str, Any] = re.findall(r'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , __lowerCAmelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
f""" got {requirement}""" )
snake_case__ , snake_case__ : int = match[0]
snake_case__ : List[str] = want_full.split(''',''' ) # there could be multiple requirements
snake_case__ : Tuple = {}
for w in want_range:
snake_case__ : str = re.findall(r'''^([\s!=<>]{1,2})(.+)''' , __lowerCAmelCase )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
f""" but got {requirement}""" )
snake_case__ , snake_case__ : List[Any] = match[0]
snake_case__ : Any = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
snake_case__ : Dict = '''.'''.join([str(__lowerCAmelCase ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return
# check if any version is installed
try:
snake_case__ : List[Any] = importlib.metadata.version(__lowerCAmelCase )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
def _lowerCAmelCase ( __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Any = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(__lowerCAmelCase , __lowerCAmelCase )
| 219 | 0 |
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[list[float]]:
_lowercase : List[str] = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
_lowercase : List[Any] = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creates a copy of the matrix with swapped positions of the elements
_lowercase : Optional[Any] = [[0.0, 0.0], [0.0, 0.0]]
_lowercase , _lowercase : str = matrix[1][1], matrix[0][0]
_lowercase , _lowercase : Optional[int] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
_lowercase : str = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('This matrix has no inverse.' )
# Creating cofactor matrix
_lowercase : str = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
_lowercase : Dict = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
_lowercase : List[str] = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
_lowercase : Dict = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
_lowercase : Dict = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
_lowercase : Tuple = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
_lowercase : Optional[Any] = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
_lowercase : Any = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
_lowercase : Dict = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
_lowercase : List[str] = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
_lowercase : Tuple = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
_lowercase : str = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
_lowercase : List[Any] = array(SCREAMING_SNAKE_CASE )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE )
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('Please provide a matrix of size 2x2 or 3x3.' )
| 66 |
from __future__ import annotations
import math
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list[int]:
if num <= 0:
_lowercase : List[str] = F"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE )
_lowercase : Union[str, Any] = [True] * (num + 1)
_lowercase : Union[str, Any] = []
_lowercase : Dict = 2
_lowercase : Union[str, Any] = int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
_lowercase : str = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 66 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class lowercase ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = BlenderbotSmallTokenizer
snake_case_ = False
def _UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
lowerCamelCase__ = ["""__start__""", """adapt""", """act""", """ap@@""", """te""", """__end__""", """__unk__"""]
lowerCamelCase__ = dict(zip(a_ , range(len(a_ ) ) ) )
lowerCamelCase__ = ["""#version: 0.2""", """a p""", """t e</w>""", """ap t</w>""", """a d""", """ad apt</w>""", """a c""", """ac t</w>""", """"""]
lowerCamelCase__ = {"""unk_token""": """__unk__""", """bos_token""": """__start__""", """eos_token""": """__end__"""}
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(a_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(a_ ) )
def _UpperCamelCase ( self : Optional[Any] , **a_ : Union[str, Any] ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **a_ )
def _UpperCamelCase ( self : List[Any] , a_ : Dict ):
"""simple docstring"""
lowerCamelCase__ = """adapt act apte"""
lowerCamelCase__ = """adapt act apte"""
return input_text, output_text
def _UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCamelCase__ = """adapt act apte"""
lowerCamelCase__ = ["""adapt""", """act""", """ap@@""", """te"""]
lowerCamelCase__ = tokenizer.tokenize(a_ )
self.assertListEqual(a_ , a_ )
lowerCamelCase__ = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCamelCase__ = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def _UpperCamelCase ( self : Tuple ):
"""simple docstring"""
lowerCamelCase__ = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
assert tok("""sam""" ).input_ids == [13_84]
lowerCamelCase__ = """I am a small frog."""
lowerCamelCase__ = tok([src_text] , padding=a_ , truncation=a_ )["""input_ids"""]
lowerCamelCase__ = tok.batch_decode(a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _UpperCamelCase ( self : int ):
"""simple docstring"""
lowerCamelCase__ = BlenderbotSmallTokenizer.from_pretrained("""facebook/blenderbot-90M""" )
lowerCamelCase__ = """I am a small frog ."""
lowerCamelCase__ = """."""
lowerCamelCase__ = tok(a_ )["""input_ids"""]
lowerCamelCase__ = tok(a_ )["""input_ids"""]
assert encoded[-1] == encoded_dot[0]
| 235 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , a_ : List[str]="" , a_ : str="train" ):
"""simple docstring"""
assert os.path.isdir(a_ )
lowerCamelCase__ = []
lowerCamelCase__ = os.listdir(a_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase__ = os.path.join(a_ , a_ )
if not os.path.isfile(a_ ):
continue
self.documents.append(a_ )
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self : Optional[int] , a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = self.documents[idx]
lowerCamelCase__ = document_path.split("""/""" )[-1]
with open(a_ , encoding="""utf-8""" ) as source:
lowerCamelCase__ = source.read()
lowerCamelCase__ , lowerCamelCase__ = process_story(a_ )
return document_name, story_lines, summary_lines
def snake_case (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = list(filter(lambda UpperCamelCase : len(UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase__ = [_add_missing_period(UpperCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCamelCase__ = []
lowerCamelCase__ = deque(UpperCamelCase )
while True:
try:
lowerCamelCase__ = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase__ = list(filter(lambda UpperCamelCase : not t.startswith("""@highlight""" ) , UpperCamelCase ) )
return story_lines, summary_lines
def snake_case (UpperCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case (UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
if len(UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(UpperCamelCase )) )
return sequence
def snake_case (UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ = torch.ones_like(UpperCamelCase )
lowerCamelCase__ = sequence == pad_token_id
lowerCamelCase__ = 0
return mask
def snake_case (UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ = [tokenizer.encode(UpperCamelCase ) for line in story_lines]
lowerCamelCase__ = [token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase__ = [tokenizer.encode(UpperCamelCase ) for line in summary_lines]
lowerCamelCase__ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case (UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = []
for sequence in batch:
lowerCamelCase__ = -1
lowerCamelCase__ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(UpperCamelCase )
return torch.tensor(UpperCamelCase )
| 235 | 1 |
'''simple docstring'''
import json
import sys
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
with open(__UpperCamelCase ,encoding='utf-8' ) as f:
lowerCamelCase_ = json.load(__UpperCamelCase )
lowerCamelCase_ = ['<details>', '<summary>Show updated benchmarks!</summary>', ' ']
for benchmark_name in sorted(__UpperCamelCase ):
lowerCamelCase_ = results[benchmark_name]
lowerCamelCase_ = benchmark_name.split('/' )[-1]
output_md.append(f'''### Benchmark: {benchmark_file_name}''' )
lowerCamelCase_ = '| metric |'
lowerCamelCase_ = '|--------|'
lowerCamelCase_ = '| new / old (diff) |'
for metric_name in sorted(__UpperCamelCase ):
lowerCamelCase_ = benchmark_res[metric_name]
lowerCamelCase_ = metric_vals['new']
lowerCamelCase_ = metric_vals.get('old' ,__UpperCamelCase )
lowerCamelCase_ = metric_vals.get('diff' ,__UpperCamelCase )
lowerCamelCase_ = f''' {new_val:f}''' if isinstance(__UpperCamelCase ,(int, float) ) else 'None'
if old_val is not None:
val_str += f''' / {old_val:f}''' if isinstance(__UpperCamelCase ,(int, float) ) else "None"
if dif_val is not None:
val_str += f''' ({dif_val:f})''' if isinstance(__UpperCamelCase ,(int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append('</details>' )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f:
f.writelines('\n'.join(__UpperCamelCase ) )
if __name__ == "__main__":
A_ = sys.argv[1]
A_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 42 |
'''simple docstring'''
import pickle
import numpy as np
from matplotlib import pyplot as plt
class UpperCAmelCase :
def __init__( self :Dict , lowercase_ :Union[str, Any] , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :List[str] , lowercase_ :Dict , lowercase_ :Tuple=0.2 , lowercase_ :List[str]=0.2 )-> Any:
A__ = bp_numa
A__ = bp_numa
A__ = bp_numa
A__ = conva_get[:2]
A__ = conva_get[2]
A__ = size_pa
A__ = rate_w
A__ = rate_t
A__ = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
A__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A__ = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
A__ = -2 * np.random.rand(self.conva[1] ) + 1
A__ = -2 * np.random.rand(self.num_bpa ) + 1
A__ = -2 * np.random.rand(self.num_bpa ) + 1
def UpperCAmelCase_ ( self :Optional[Any] , lowercase_ :Optional[Any] )-> Optional[Any]:
# save model dict with pickle
A__ = {
"num_bp1": self.num_bpa,
"num_bp2": self.num_bpa,
"num_bp3": self.num_bpa,
"conv1": self.conva,
"step_conv1": self.step_conva,
"size_pooling1": self.size_poolinga,
"rate_weight": self.rate_weight,
"rate_thre": self.rate_thre,
"w_conv1": self.w_conva,
"wkj": self.wkj,
"vji": self.vji,
"thre_conv1": self.thre_conva,
"thre_bp2": self.thre_bpa,
"thre_bp3": self.thre_bpa,
}
with open(lowercase_ , "wb" ) as f:
pickle.dump(lowercase_ , lowercase_ )
print(F"Model saved: {save_path}" )
@classmethod
def UpperCAmelCase_ ( cls :Any , lowercase_ :List[Any] )-> Optional[Any]:
# read saved model
with open(lowercase_ , "rb" ) as f:
A__ = pickle.load(lowercase_ ) # noqa: S301
A__ = model_dic.get("conv1" )
conv_get.append(model_dic.get("step_conv1" ) )
A__ = model_dic.get("size_pooling1" )
A__ = model_dic.get("num_bp1" )
A__ = model_dic.get("num_bp2" )
A__ = model_dic.get("num_bp3" )
A__ = model_dic.get("rate_weight" )
A__ = model_dic.get("rate_thre" )
# create model instance
A__ = CNN(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# modify model parameter
A__ = model_dic.get("w_conv1" )
A__ = model_dic.get("wkj" )
A__ = model_dic.get("vji" )
A__ = model_dic.get("thre_conv1" )
A__ = model_dic.get("thre_bp2" )
A__ = model_dic.get("thre_bp3" )
return conv_ins
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[int] )-> Optional[Any]:
return 1 / (1 + np.exp(-1 * x ))
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :Optional[Any] )-> Any:
return round(lowercase_ , 3 )
def UpperCAmelCase_ ( self :int , lowercase_ :Tuple , lowercase_ :List[Any] , lowercase_ :Tuple , lowercase_ :Union[str, Any] , lowercase_ :Dict )-> str:
# convolution process
A__ = convs[0]
A__ = convs[1]
A__ = np.shape(lowercase_ )[0]
# get the data slice of original image data, data_focus
A__ = []
for i_focus in range(0 , size_data - size_conv + 1 , lowercase_ ):
for j_focus in range(0 , size_data - size_conv + 1 , lowercase_ ):
A__ = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(lowercase_ )
# calculate the feature map of every single kernel, and saved as list of matrix
A__ = []
A__ = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(lowercase_ ):
A__ = []
for i_focus in range(len(lowercase_ ) ):
A__ = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(lowercase_ ) )
A__ = np.asmatrix(lowercase_ ).reshape(
lowercase_ , lowercase_ )
data_featuremap.append(lowercase_ )
# expanding the data slice to One dimenssion
A__ = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(lowercase_ ) )
A__ = np.asarray(lowercase_ )
return focus_list, data_featuremap
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Optional[int] , lowercase_ :Union[str, Any] , lowercase_ :Optional[Any]="average_pool" )-> Dict:
# pooling process
A__ = len(featuremaps[0] )
A__ = int(size_map / size_pooling )
A__ = []
for i_map in range(len(lowercase_ ) ):
A__ = featuremaps[i_map]
A__ = []
for i_focus in range(0 , lowercase_ , lowercase_ ):
for j_focus in range(0 , lowercase_ , lowercase_ ):
A__ = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(lowercase_ ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(lowercase_ ) )
A__ = np.asmatrix(lowercase_ ).reshape(lowercase_ , lowercase_ )
featuremap_pooled.append(lowercase_ )
return featuremap_pooled
def UpperCAmelCase_ ( self :str , lowercase_ :Any )-> List[Any]:
# expanding three dimension data to one dimension list
A__ = []
for i in range(len(lowercase_ ) ):
A__ = np.shape(data[i] )
A__ = data[i].reshape(1 , shapes[0] * shapes[1] )
A__ = data_listed.getA().tolist()[0]
data_expanded.extend(lowercase_ )
A__ = np.asarray(lowercase_ )
return data_expanded
def UpperCAmelCase_ ( self :int , lowercase_ :Optional[Any] )-> Dict:
# expanding matrix to one dimension list
A__ = np.asarray(lowercase_ )
A__ = np.shape(lowercase_ )
A__ = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def UpperCAmelCase_ ( self :Union[str, Any] , lowercase_ :List[str] , lowercase_ :Any , lowercase_ :str , lowercase_ :Any , lowercase_ :Union[str, Any] )-> Any:
A__ = []
A__ = 0
for i_map in range(lowercase_ ):
A__ = np.ones((size_map, size_map) )
for i in range(0 , lowercase_ , lowercase_ ):
for j in range(0 , lowercase_ , lowercase_ ):
A__ = pd_pool[
i_pool
]
A__ = i_pool + 1
A__ = np.multiply(
lowercase_ , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(lowercase_ )
return pd_all
def UpperCAmelCase_ ( self :List[str] , lowercase_ :Optional[Any] , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Union[str, Any] , lowercase_ :str=bool )-> Tuple:
# model traning
print("----------------------Start Training-------------------------" )
print((" - - Shape: Train_Data ", np.shape(lowercase_ )) )
print((" - - Shape: Teach_Data ", np.shape(lowercase_ )) )
A__ = 0
A__ = []
A__ = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
A__ = 0
print(F"-------------Learning Time {rp}--------------" )
for p in range(len(lowercase_ ) ):
# print('------------Learning Image: %d--------------'%p)
A__ = np.asmatrix(datas_train[p] )
A__ = np.asarray(datas_teach[p] )
A__, A__ = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A__ = self.pooling(lowercase_ , self.size_poolinga )
A__ = np.shape(lowercase_ )
A__ = self._expand(lowercase_ )
A__ = data_bp_input
A__ = np.dot(lowercase_ , self.vji.T ) - self.thre_bpa
A__ = self.sig(lowercase_ )
A__ = np.dot(lowercase_ , self.wkj.T ) - self.thre_bpa
A__ = self.sig(lowercase_ )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
A__ = np.multiply(
(data_teach - bp_outa) , np.multiply(lowercase_ , (1 - bp_outa) ) )
A__ = np.multiply(
np.dot(lowercase_ , self.wkj ) , np.multiply(lowercase_ , (1 - bp_outa) ) )
A__ = np.dot(lowercase_ , self.vji )
A__ = pd_i_all / (self.size_poolinga * self.size_poolinga)
A__ = pd_conva_pooled.T.getA().tolist()
A__ = self._calculate_gradient_from_pool(
lowercase_ , lowercase_ , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
A__ = self._expand_mat(pd_conva_all[k_conv] )
A__ = self.rate_weight * np.dot(lowercase_ , lowercase_ )
A__ = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
A__ = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
A__ = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
A__ = self.vji + pd_j_all.T * bp_outa * self.rate_weight
A__ = self.thre_bpa - pd_k_all * self.rate_thre
A__ = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
A__ = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
A__ = rp + 1
A__ = error_count / patterns
all_mse.append(lowercase_ )
def draw_error():
A__ = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(lowercase_ , "+-" )
plt.plot(lowercase_ , "r--" )
plt.xlabel("Learning Times" )
plt.ylabel("All_mse" )
plt.grid(lowercase_ , alpha=0.5 )
plt.show()
print("------------------Training Complished---------------------" )
print((" - - Training epoch: ", rp, F" - - Mse: {mse:.6f}") )
if draw_e:
draw_error()
return mse
def UpperCAmelCase_ ( self :Dict , lowercase_ :List[Any] )-> Optional[Any]:
# model predict
A__ = []
print("-------------------Start Testing-------------------------" )
print((" - - Shape: Test_Data ", np.shape(lowercase_ )) )
for p in range(len(lowercase_ ) ):
A__ = np.asmatrix(datas_test[p] )
A__, A__ = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A__ = self.pooling(lowercase_ , self.size_poolinga )
A__ = self._expand(lowercase_ )
A__ = data_bp_input
A__ = bp_outa * self.vji.T - self.thre_bpa
A__ = self.sig(lowercase_ )
A__ = bp_outa * self.wkj.T - self.thre_bpa
A__ = self.sig(lowercase_ )
produce_out.extend(bp_outa.getA().tolist() )
A__ = [list(map(self.do_round , lowercase_ ) ) for each in produce_out]
return np.asarray(lowercase_ )
def UpperCAmelCase_ ( self :Dict , lowercase_ :Optional[int] )-> List[str]:
# return the data of image after convoluting process so we can check it out
A__ = np.asmatrix(lowercase_ )
A__, A__ = self.convolute(
lowercase_ , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
A__ = self.pooling(lowercase_ , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 440 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def lowerCAmelCase ( UpperCamelCase__ : Dict ):
"""simple docstring"""
__UpperCAmelCase = [False] * len(UpperCamelCase__ )
__UpperCAmelCase = [-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : List[str] , UpperCamelCase__ : Any ):
__UpperCAmelCase = True
__UpperCAmelCase = c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__ , 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__ , 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
__lowerCAmelCase : Union[str, Any] = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 654 | '''simple docstring'''
def lowerCAmelCase ( UpperCamelCase__ : Tuple ):
"""simple docstring"""
# if the collection is empty, returns empty
if collection == []:
return []
# get some information about the collection
__UpperCAmelCase = len(UpperCamelCase__ )
__UpperCAmelCase = max(UpperCamelCase__ )
__UpperCAmelCase = min(UpperCamelCase__ )
# create the counting array
__UpperCAmelCase = coll_max + 1 - coll_min
__UpperCAmelCase = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , UpperCamelCase__ ):
__UpperCAmelCase = counting_arr[i] + counting_arr[i - 1]
# create the output collection
__UpperCAmelCase = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , UpperCamelCase__ ) ):
__UpperCAmelCase = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowerCAmelCase ( UpperCamelCase__ : Any ):
"""simple docstring"""
return "".join([chr(UpperCamelCase__ ) for i in counting_sort([ord(UpperCamelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
__lowerCAmelCase : str = input("Enter numbers separated by a comma:\n").strip()
__lowerCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 654 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class a ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase :Optional[int] = CanineTokenizer
lowerCamelCase :Tuple = False
def UpperCAmelCase ( self ) -> int:
super().setUp()
_A = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def UpperCAmelCase ( self ) -> Union[str, Any]:
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def UpperCAmelCase ( self , **lowerCAmelCase_ ) -> List[Any]:
_A = self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
_A = 10_24
return tokenizer
@require_torch
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.canine_tokenizer
_A = ["""Life is like a box of chocolates.""", """You never know what you're gonna get."""]
# fmt: off
_A = [5_73_44, 76, 1_05, 1_02, 1_01, 32, 1_05, 1_15, 32, 1_08, 1_05, 1_07, 1_01, 32, 97, 32, 98, 1_11, 1_20, 32, 1_11, 1_02, 32, 99, 1_04, 1_11, 99, 1_11, 1_08, 97, 1_16, 1_01, 1_15, 46, 5_73_45, 0, 0, 0, 0]
# fmt: on
_A = tokenizer(_a , padding=_a , return_tensors="""pt""" )
self.assertIsInstance(_a , _a )
_A = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_a , _a )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def UpperCAmelCase ( self ) -> Optional[int]:
_A = self.canine_tokenizer
_A = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
_A = tokenizer(_a , padding=_a , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , _a )
self.assertIn("""attention_mask""" , _a )
self.assertIn("""token_type_ids""" , _a )
@require_torch
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.canine_tokenizer
_A = [
"""What's the weater?""",
"""It's about 25 degrees.""",
]
_A = tokenizer(
text_target=_a , max_length=32 , padding="""max_length""" , truncation=_a , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def UpperCAmelCase ( self ) -> int:
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = """ He is very happy, UNwant\u00E9d,running"""
_A = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
_A = tokenizer.__class__.from_pretrained(_a )
_A = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
shutil.rmtree(_a )
_A = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
_A = tempfile.mkdtemp()
_A = """ He is very happy, UNwant\u00E9d,running"""
_A = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
_A = chr(0xe_007 )
additional_special_tokens.append(_a )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
_A = tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
_A = tokenizer.__class__.from_pretrained(_a )
_A = after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
self.assertIn(_a , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_A = tokenizer.__class__.from_pretrained(_a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_a )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A , _A = self.get_clean_sequence(_a )
# a special token for Canine can be defined as follows:
_A = 0xe_005
_A = chr(_a )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
_A = tokenizer.encode(_a , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
_A = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_a )
_A = tokenizer.encode(_a , add_special_tokens=_a )
_A = tokenizer.encode(_a , add_special_tokens=_a )
_A = tokenizer.encode(_a , add_special_tokens=_a )
self.assertEqual(_a , input_encoded + special_token_id )
_A = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def UpperCAmelCase ( self ) -> Union[str, Any]:
_A = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = chr(0xe_005 )
_A = chr(0xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_a )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
_A = tokenizer.tokenize(_a )
_A = tokenizer.tokenize(_a )
self.assertEqual(len(_a ) , 1 )
self.assertEqual(len(_a ) , 1 )
self.assertEqual(token_a[0] , _a )
self.assertEqual(token_a[0] , _a )
@require_tokenizers
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
_A = 0xe_006
_A = chr(_a )
_A = AddedToken(_a , lstrip=_a )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_a )
tokenizer.from_pretrained(_a )
def UpperCAmelCase ( self ) -> Optional[Any]:
_A = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
with open(os.path.join(_a , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
_A = json.load(_a )
with open(os.path.join(_a , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
_A = json.load(_a )
# a special token for Canine can be defined as follows:
_A = 0xe_006
_A = chr(_a )
_A = [new_token_a]
_A = [new_token_a]
with open(os.path.join(_a , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_a , _a )
with open(os.path.join(_a , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_a , _a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_A = tokenizer_class.from_pretrained(_a , extra_ids=0 )
self.assertIn(_a , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
_A = 0xe_007
_A = chr(_a )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_A = [AddedToken(_a , lstrip=_a )]
_A = tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , extra_ids=0 )
self.assertIn(_a , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def UpperCAmelCase ( self ) -> Tuple:
_A = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = """hello world"""
if self.space_between_special_tokens:
_A = """[CLS] hello world [SEP]"""
else:
_A = input
_A = tokenizer.encode(_a , add_special_tokens=_a )
_A = tokenizer.decode(_a , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_a , [output, output.lower()] )
def UpperCAmelCase ( self ) -> str:
_A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
_A = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
_A = """a"""
_A = ord(_a )
for attr in attributes_list:
setattr(_a , attr + """_id""" , _a )
self.assertEqual(getattr(_a , _a ) , _a )
self.assertEqual(getattr(_a , attr + """_id""" ) , _a )
setattr(_a , attr + """_id""" , _a )
self.assertEqual(getattr(_a , _a ) , _a )
self.assertEqual(getattr(_a , attr + """_id""" ) , _a )
setattr(_a , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_a , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_a , """additional_special_tokens_ids""" ) , [] )
_A = 0xe_006
_A = chr(_a )
setattr(_a , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(_a , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(_a , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def UpperCAmelCase ( self ) -> List[str]:
pass
def UpperCAmelCase ( self ) -> Any:
pass
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> str:
pass
def UpperCAmelCase ( self ) -> List[str]:
pass
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> Dict:
pass
def UpperCAmelCase ( self ) -> List[Any]:
pass
| 401 |
"""simple docstring"""
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
lowerCAmelCase : List[str] = object()
# For specifying empty leaf dict `{}`
lowerCAmelCase : Any = object()
def a__ ( snake_case__ , snake_case__ ) -> List[Any]:
lowerCamelCase = tuple((re.compile(x + """$""" ) for x in qs) )
for i in range(len(snake_case__ ) - len(snake_case__ ) + 1 ):
lowerCamelCase = [x.match(snake_case__ ) for x, y in zip(snake_case__ , ks[i:] )]
if matches and all(snake_case__ ):
return True
return False
def a__ ( snake_case__ ) -> str:
def replace(snake_case__ , snake_case__ ):
for rule, replacement in rules:
if _match(snake_case__ , snake_case__ ):
return replacement
return val
return replace
def a__ ( ) -> Union[str, Any]:
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , snake_case__ )),
(("transformer", "wte", "embedding"), P("""mp""" , snake_case__ )),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(snake_case__ , """mp""" )),
(("attention", "out_proj", "kernel"), P("""mp""" , snake_case__ )),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(snake_case__ , """mp""" )),
(("mlp", "c_fc", "bias"), P("""mp""" )),
(("mlp", "c_proj", "kernel"), P("""mp""" , snake_case__ )),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def a__ ( snake_case__ ) -> Optional[Any]:
lowerCamelCase = _get_partition_rules()
lowerCamelCase = _replacement_rules(snake_case__ )
lowerCamelCase = {k: _unmatched for k in flatten_dict(snake_case__ )}
lowerCamelCase = {k: replace(snake_case__ , snake_case__ ) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(snake_case__ ) )
| 543 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
__lowerCamelCase : List[Any] = parse(importlib.metadata.version("""torch"""))
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Version] , snake_case_ : str , snake_case_ : str ):
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
snake_case__ : List[str] = STR_OPERATION_TO_FUNC[operation]
if isinstance(snake_case_ , snake_case_ ):
snake_case__ : int = parse(importlib.metadata.version(snake_case_ ) )
return operation(snake_case_ , parse(snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
return compare_versions(snake_case_ , snake_case_ , snake_case_ )
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 1 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : List[Any] = {
'configuration_vivit': ['VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VivitConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict = ['VivitImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
'VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'VivitModel',
'VivitPreTrainedModel',
'VivitForVideoClassification',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
_lowercase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 49 |
"""simple docstring"""
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_A : str = LEDTokenizer
_A : List[Any] = LEDTokenizerFast
_A : Dict = True
def lowerCamelCase(self ):
super().setUp()
A_ : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
A_ : Any = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A_ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
A_ : List[str] = {"""unk_token""": """<unk>"""}
A_ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , **lowerCAmelCase_ ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
return "lower newer", "lower newer"
@cached_property
def lowerCamelCase(self ):
return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" )
@cached_property
def lowerCamelCase(self ):
return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" )
@require_torch
def lowerCamelCase(self ):
A_ : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
A_ : Any = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[int] = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
A_ : int = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def lowerCamelCase(self ):
A_ : Union[str, Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Tuple = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIn("""input_ids""" , lowerCAmelCase_ )
self.assertIn("""attention_mask""" , lowerCAmelCase_ )
self.assertNotIn("""labels""" , lowerCAmelCase_ )
self.assertNotIn("""decoder_attention_mask""" , lowerCAmelCase_ )
@require_torch
def lowerCamelCase(self ):
A_ : Tuple = [
"""Summary of the text.""",
"""Another summary.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[int] = tokenizer(text_target=lowerCAmelCase_ , max_length=32 , padding="""max_length""" , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
@require_torch
def lowerCamelCase(self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : Optional[Any] = tokenizer(
["""I am a small frog""" * 1024, """I am a small frog"""] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors="""pt""" )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 5122) )
@require_torch
def lowerCamelCase(self ):
A_ : Dict = ["""A long paragraph for summarization."""]
A_ : Any = [
"""Summary of the text.""",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : int = tokenizer(lowerCAmelCase_ , return_tensors="""pt""" )
A_ : Optional[int] = tokenizer(text_target=lowerCAmelCase_ , return_tensors="""pt""" )
A_ : str = inputs["""input_ids"""]
A_ : Tuple = targets["""input_ids"""]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def lowerCamelCase(self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
A_ : str = ["""Summary of the text.""", """Another summary."""]
A_ : Dict = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
A_ : Optional[Any] = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ )
A_ : Any = [[0] * len(lowerCAmelCase_ ) for x in encoded_output["""input_ids"""]]
A_ : Tuple = tokenizer.pad(lowerCAmelCase_ )
self.assertSequenceEqual(outputs["""global_attention_mask"""] , lowerCAmelCase_ )
def lowerCamelCase(self ):
pass
def lowerCamelCase(self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
A_ : Any = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : Optional[Any] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
A_ : Optional[Any] = """A, <mask> AllenNLP sentence."""
A_ : List[Any] = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
A_ : Any = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) )
self.assertEqual(
sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , )
A_ : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] )
A_ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] )
self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
self.assertSequenceEqual(
lowerCAmelCase_ , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
| 180 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Dict = {
'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'],
'tokenization_electra': ['ElectraTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['ElectraTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'ElectraForCausalLM',
'ElectraForMaskedLM',
'ElectraForMultipleChoice',
'ElectraForPreTraining',
'ElectraForQuestionAnswering',
'ElectraForSequenceClassification',
'ElectraForTokenClassification',
'ElectraModel',
'ElectraPreTrainedModel',
'load_tf_weights_in_electra',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFElectraForMaskedLM',
'TFElectraForMultipleChoice',
'TFElectraForPreTraining',
'TFElectraForQuestionAnswering',
'TFElectraForSequenceClassification',
'TFElectraForTokenClassification',
'TFElectraModel',
'TFElectraPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = [
'FlaxElectraForCausalLM',
'FlaxElectraForMaskedLM',
'FlaxElectraForMultipleChoice',
'FlaxElectraForPreTraining',
'FlaxElectraForQuestionAnswering',
'FlaxElectraForSequenceClassification',
'FlaxElectraForTokenClassification',
'FlaxElectraModel',
'FlaxElectraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 593 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Optional[Any] = {
'configuration_autoformer': [
'AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'AutoformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'AutoformerForPrediction',
'AutoformerModel',
'AutoformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 593 | 1 |
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
__SCREAMING_SNAKE_CASE ="""bert-base-cased"""
__SCREAMING_SNAKE_CASE ="""google/pegasus-xsum"""
__SCREAMING_SNAKE_CASE =[""" Sam ate lunch today.""", """Sams lunch ingredients."""]
__SCREAMING_SNAKE_CASE =["""A very interesting story about what I ate for lunch.""", """Avocado, celery, turkey, coffee"""]
__SCREAMING_SNAKE_CASE ="""patrickvonplaten/t5-tiny-random"""
__SCREAMING_SNAKE_CASE ="""sshleifer/bart-tiny-random"""
__SCREAMING_SNAKE_CASE ="""sshleifer/tiny-mbart"""
__SCREAMING_SNAKE_CASE ="""sshleifer/tiny-marian-en-de"""
def a (_lowerCAmelCase , _lowerCAmelCase ):
SCREAMING_SNAKE_CASE_ = '''\n'''.join(_lowerCAmelCase )
Path(_lowerCAmelCase ).open('''w''' ).writelines(_lowerCAmelCase )
def a (_lowerCAmelCase ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(_lowerCAmelCase , F"{split}.source" ) , _lowerCAmelCase )
_dump_articles(os.path.join(_lowerCAmelCase , F"{split}.target" ) , _lowerCAmelCase )
return tmp_dir
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def _A ( self: List[str] , _lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ = max(len(tokenizer.encode(_lowerCamelCase ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE_ = max(len(tokenizer.encode(_lowerCamelCase ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
_lowerCamelCase , data_dir=_lowerCamelCase , type_path='''train''' , max_source_length=_lowerCamelCase , max_target_length=_lowerCamelCase , src_lang=_lowerCamelCase , tgt_lang=_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
SCREAMING_SNAKE_CASE_ = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def _A ( self: Dict , _lowerCamelCase: Optional[int] ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
SCREAMING_SNAKE_CASE_ = max(len(tokenizer.encode(_lowerCamelCase ) ) for a in ARTICLES )
SCREAMING_SNAKE_CASE_ = max(len(tokenizer.encode(_lowerCamelCase ) ) for a in SUMMARIES )
SCREAMING_SNAKE_CASE_ = 4
SCREAMING_SNAKE_CASE_ = LegacySeqaSeqDataset(
_lowerCamelCase , data_dir=_lowerCamelCase , type_path='''train''' , max_source_length=20 , max_target_length=_lowerCamelCase , )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def _A ( self: Union[str, Any] ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
SCREAMING_SNAKE_CASE_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
SCREAMING_SNAKE_CASE_ = tmp_dir.joinpath('''train.source''' ).open().readlines()
SCREAMING_SNAKE_CASE_ = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_lowerCamelCase , _lowerCamelCase , 1_28 , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {x.name for x in tmp_dir.iterdir()}
SCREAMING_SNAKE_CASE_ = {x.name for x in save_dir.iterdir()}
SCREAMING_SNAKE_CASE_ = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_lowerCamelCase ) < len(_lowerCamelCase )
assert len(_lowerCamelCase ) == 1
assert len(packed_examples[0] ) == sum(len(_lowerCamelCase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def _A ( self: List[str] ):
if not FAIRSEQ_AVAILABLE:
return
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_dataset(max_len=64 )
SCREAMING_SNAKE_CASE_ = 64
SCREAMING_SNAKE_CASE_ = ds.make_dynamic_sampler(_lowerCamelCase , required_batch_size_multiple=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = [len(_lowerCamelCase ) for x in batch_sampler]
assert len(set(_lowerCamelCase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_lowerCamelCase ) == len(_lowerCamelCase ) # no dropped or added examples
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_sampler=_lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = []
for batch in data_loader:
SCREAMING_SNAKE_CASE_ = batch['''input_ids'''].shape
SCREAMING_SNAKE_CASE_ = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
SCREAMING_SNAKE_CASE_ = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(_lowerCamelCase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_lowerCamelCase )
assert num_src_per_batch[0] == max(_lowerCamelCase )
if failures:
raise AssertionError(f"too many tokens in {len(_lowerCamelCase )} batches" )
def _A ( self: Any ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_dataset(max_len=5_12 )
SCREAMING_SNAKE_CASE_ = 2
SCREAMING_SNAKE_CASE_ = ds.make_sortish_sampler(_lowerCamelCase , shuffle=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 )
SCREAMING_SNAKE_CASE_ = DataLoader(_lowerCamelCase , batch_size=_lowerCamelCase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = tokenizer.pad_token_id
def count_pad_tokens(_lowerCamelCase: List[Any] , _lowerCamelCase: str="input_ids" ):
return [batch[k].eq(_lowerCamelCase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_lowerCamelCase , k='''labels''' ) ) < sum(count_pad_tokens(_lowerCamelCase , k='''labels''' ) )
assert sum(count_pad_tokens(_lowerCamelCase ) ) < sum(count_pad_tokens(_lowerCamelCase ) )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
def _A ( self: Optional[Any] , _lowerCamelCase: str=10_00 , _lowerCamelCase: Tuple=1_28 ):
if os.getenv('''USE_REAL_DATA''' , _lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = '''examples/seq2seq/wmt_en_ro'''
SCREAMING_SNAKE_CASE_ = max_len * 2 * 64
if not Path(_lowerCamelCase ).joinpath('''train.len''' ).exists():
save_len_file(_lowerCamelCase , _lowerCamelCase )
else:
SCREAMING_SNAKE_CASE_ = '''examples/seq2seq/test_data/wmt_en_ro'''
SCREAMING_SNAKE_CASE_ = max_len * 4
save_len_file(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
_lowerCamelCase , data_dir=_lowerCamelCase , type_path='''train''' , max_source_length=_lowerCamelCase , max_target_length=_lowerCamelCase , n_obs=_lowerCamelCase , )
return ds, max_tokens, tokenizer
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self._get_dataset()
SCREAMING_SNAKE_CASE_ = set(DistributedSortishSampler(_lowerCamelCase , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=_lowerCamelCase ) )
SCREAMING_SNAKE_CASE_ = set(DistributedSortishSampler(_lowerCamelCase , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=_lowerCamelCase ) )
assert idsa.intersection(_lowerCamelCase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def _A ( self: List[Any] , _lowerCamelCase: List[str] ):
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(_lowerCamelCase , use_fast=_lowerCamelCase )
if tok_name == MBART_TINY:
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
_lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
SCREAMING_SNAKE_CASE_ = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
SCREAMING_SNAKE_CASE_ = SeqaSeqDataset(
_lowerCamelCase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
SCREAMING_SNAKE_CASE_ = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_lowerCamelCase ) == 1 if tok_name == BART_TINY else len(_lowerCamelCase ) == 0
| 234 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE =logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE ={
"""huggingface/informer-tourism-monthly""": (
"""https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"""
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class __magic_name__ ( __UpperCAmelCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = "informer"
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
"num_hidden_layers": "encoder_layers",
}
def __init__( self: Tuple , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: Optional[int] = None , _lowerCamelCase: str = "student_t" , _lowerCamelCase: str = "nll" , _lowerCamelCase: int = 1 , _lowerCamelCase: List[int] = None , _lowerCamelCase: Optional[Union[str, bool]] = "mean" , _lowerCamelCase: int = 0 , _lowerCamelCase: int = 0 , _lowerCamelCase: int = 0 , _lowerCamelCase: int = 0 , _lowerCamelCase: Optional[List[int]] = None , _lowerCamelCase: Optional[List[int]] = None , _lowerCamelCase: int = 64 , _lowerCamelCase: int = 32 , _lowerCamelCase: int = 32 , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 2 , _lowerCamelCase: int = 2 , _lowerCamelCase: bool = True , _lowerCamelCase: str = "gelu" , _lowerCamelCase: float = 0.05 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: float = 0.1 , _lowerCamelCase: int = 1_00 , _lowerCamelCase: float = 0.02 , _lowerCamelCase: List[str]=True , _lowerCamelCase: str = "prob" , _lowerCamelCase: int = 5 , _lowerCamelCase: bool = True , **_lowerCamelCase: Tuple , ):
# time series specific configuration
SCREAMING_SNAKE_CASE_ = prediction_length
SCREAMING_SNAKE_CASE_ = context_length or prediction_length
SCREAMING_SNAKE_CASE_ = distribution_output
SCREAMING_SNAKE_CASE_ = loss
SCREAMING_SNAKE_CASE_ = input_size
SCREAMING_SNAKE_CASE_ = num_time_features
SCREAMING_SNAKE_CASE_ = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
SCREAMING_SNAKE_CASE_ = scaling
SCREAMING_SNAKE_CASE_ = num_dynamic_real_features
SCREAMING_SNAKE_CASE_ = num_static_real_features
SCREAMING_SNAKE_CASE_ = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(_lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
SCREAMING_SNAKE_CASE_ = cardinality
else:
SCREAMING_SNAKE_CASE_ = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(_lowerCamelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
SCREAMING_SNAKE_CASE_ = embedding_dimension
else:
SCREAMING_SNAKE_CASE_ = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
SCREAMING_SNAKE_CASE_ = num_parallel_samples
# Transformer architecture configuration
SCREAMING_SNAKE_CASE_ = input_size * len(self.lags_sequence ) + self._number_of_features
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = encoder_attention_heads
SCREAMING_SNAKE_CASE_ = decoder_attention_heads
SCREAMING_SNAKE_CASE_ = encoder_ffn_dim
SCREAMING_SNAKE_CASE_ = decoder_ffn_dim
SCREAMING_SNAKE_CASE_ = encoder_layers
SCREAMING_SNAKE_CASE_ = decoder_layers
SCREAMING_SNAKE_CASE_ = dropout
SCREAMING_SNAKE_CASE_ = attention_dropout
SCREAMING_SNAKE_CASE_ = activation_dropout
SCREAMING_SNAKE_CASE_ = encoder_layerdrop
SCREAMING_SNAKE_CASE_ = decoder_layerdrop
SCREAMING_SNAKE_CASE_ = activation_function
SCREAMING_SNAKE_CASE_ = init_std
SCREAMING_SNAKE_CASE_ = use_cache
# Informer
SCREAMING_SNAKE_CASE_ = attention_type
SCREAMING_SNAKE_CASE_ = sampling_factor
SCREAMING_SNAKE_CASE_ = distil
super().__init__(is_encoder_decoder=_lowerCamelCase , **_lowerCamelCase )
@property
def _A ( self: List[Any] ):
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 234 | 1 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
"""simple docstring"""
@property
def _a ( self : Dict ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _a ( self : List[str] ):
"""simple docstring"""
A =ort.SessionOptions()
A =False
return options
def _a ( self : Dict ):
"""simple docstring"""
A =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo.png" )
A =load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/overture-creations-5sI6fQgYIuo_mask.png" )
A =load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy" )
# using the PNDM scheduler by default
A =OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=__UpperCamelCase , feature_extractor=__UpperCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
A ="A red cat sitting on a park bench"
A =np.random.RandomState(0 )
A =pipe(
prompt=__UpperCamelCase , image=__UpperCamelCase , mask_image=__UpperCamelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=__UpperCamelCase , output_type="np" , )
A =output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-2
| 707 |
from __future__ import annotations
def UpperCamelCase_ ( a_ ) ->None:
create_state_space_tree(a_ , [] , 0 , [0 for i in range(len(a_ ) )] )
def UpperCamelCase_ ( a_ , a_ , a_ , a_ , ) ->None:
if index == len(a_ ):
print(a_ )
return
for i in range(len(a_ ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
A =True
create_state_space_tree(a_ , a_ , index + 1 , a_ )
current_sequence.pop()
A =False
__a = [3, 1, 2, 4]
generate_all_permutations(sequence)
__a = ["A", "B", "C"]
generate_all_permutations(sequence_a)
| 689 | 0 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a ( __UpperCAmelCase : int=None ) -> List[Any]:
if subparsers is not None:
__magic_name__: str = subparsers.add_parser("""env""" )
else:
__magic_name__: List[Any] = argparse.ArgumentParser("""Accelerate env command""" )
parser.add_argument(
"""--config_file""" , default=__UpperCAmelCase , help="""The config file to use for the default values in the launching script.""" )
if subparsers is not None:
parser.set_defaults(func=__UpperCAmelCase )
return parser
def a ( __UpperCAmelCase : Union[str, Any] ) -> int:
__magic_name__: Union[str, Any] = torch.__version__
__magic_name__: Optional[int] = torch.cuda.is_available()
__magic_name__: Tuple = is_xpu_available()
__magic_name__: Tuple = is_npu_available()
__magic_name__: str = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(__UpperCAmelCase ):
__magic_name__: List[Any] = load_config_from_file(args.config_file ).to_dict()
__magic_name__: str = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": f'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(__UpperCAmelCase ),
"""PyTorch NPU available""": str(__UpperCAmelCase ),
"""System RAM""": f'{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB',
}
if pt_cuda_available:
__magic_name__: Union[str, Any] = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""" )
print("""\n""".join([f'- {prop}: {val}' for prop, val in info.items()] ) )
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""" )
__magic_name__: Optional[Any] = (
"""\n""".join([f'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(__UpperCAmelCase , __UpperCAmelCase )
else f'\t{accelerate_config}'
)
print(__UpperCAmelCase )
__magic_name__: Tuple = accelerate_config
return info
def a ( ) -> int:
__magic_name__: List[Any] = env_command_parser()
__magic_name__: Any = parser.parse_args()
env_command(__UpperCAmelCase )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 96 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__lowerCamelCase = logging.get_logger(__name__)
class __A ( SCREAMING_SNAKE_CASE_ ):
def __init__( self : Union[str, Any] , __snake_case : int , __snake_case : int , __snake_case : float , **__snake_case : Optional[Any] ) -> str:
__magic_name__: Optional[Any] = feature_size
__magic_name__: List[Any] = sampling_rate
__magic_name__: Tuple = padding_value
__magic_name__: int = kwargs.pop("""padding_side""" , """right""" )
__magic_name__: Optional[Any] = kwargs.pop("""return_attention_mask""" , __snake_case )
super().__init__(**__snake_case )
def lowerCamelCase__ ( self : Union[str, Any] , __snake_case : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , __snake_case : Union[bool, str, PaddingStrategy] = True , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[str, TensorType]] = None , ) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(__snake_case , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__magic_name__: Union[str, Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
__magic_name__: Any = processed_features[self.model_input_names[0]]
__magic_name__: Tuple = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(__snake_case ) == 0:
if return_attention_mask:
__magic_name__: Optional[Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__magic_name__: Tuple = required_input[0]
if isinstance(__snake_case , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__magic_name__: str = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(__snake_case ):
__magic_name__: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(__snake_case ):
__magic_name__: List[str] = """tf"""
elif is_torch_tensor(__snake_case ):
__magic_name__: Any = """pt"""
elif isinstance(__snake_case , (int, float, list, tuple, np.ndarray) ):
__magic_name__: int = """np"""
else:
raise ValueError(
F'type of {first_element} unknown: {type(__snake_case )}. '
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__magic_name__: List[str] = to_numpy(__snake_case )
else:
__magic_name__: Any = [to_numpy(__snake_case ) for v in value]
# Convert padding_strategy in PaddingStrategy
__magic_name__: str = self._get_padding_strategies(padding=__snake_case , max_length=__snake_case )
__magic_name__: str = processed_features[self.model_input_names[0]]
__magic_name__: str = len(__snake_case )
if not all(len(__snake_case ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__magic_name__: List[Any] = []
for i in range(__snake_case ):
__magic_name__: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__magic_name__: List[Any] = self._truncate(
__snake_case , max_length=__snake_case , pad_to_multiple_of=__snake_case , truncation=__snake_case , )
truncated_inputs.append(__snake_case )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__magic_name__: Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__magic_name__: Union[str, Any] = PaddingStrategy.MAX_LENGTH
__magic_name__: List[str] = {}
for i in range(__snake_case ):
# padding
__magic_name__: str = self._pad(
truncated_inputs[i] , max_length=__snake_case , padding_strategy=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , )
for key, value in outputs.items():
if key not in batch_outputs:
__magic_name__: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__magic_name__: Any = value.astype(np.floataa )
batch_outputs[key].append(__snake_case )
return BatchFeature(__snake_case , tensor_type=__snake_case )
def lowerCamelCase__ ( self : Tuple , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> dict:
__magic_name__: List[str] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__magic_name__: List[Any] = len(__snake_case )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__: List[str] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__: str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__snake_case ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__magic_name__: int = np.ones(len(__snake_case ) , dtype=np.intaa )
if needs_to_be_padded:
__magic_name__: str = max_length - len(__snake_case )
if self.padding_side == "right":
if return_attention_mask:
__magic_name__: List[Any] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__magic_name__: Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__magic_name__: int = np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__magic_name__: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__magic_name__: Optional[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__magic_name__: int = np.pad(
__snake_case , __snake_case , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def lowerCamelCase__ ( self : Optional[Any] , __snake_case : Union[Dict[str, np.ndarray], BatchFeature] , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : Optional[bool] = None , ) -> int:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__magic_name__: Dict = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__magic_name__: Optional[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__magic_name__: Tuple = len(__snake_case ) > max_length
if needs_to_be_truncated:
__magic_name__: Any = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__magic_name__: List[Any] = processed_features["""attention_mask"""][:max_length]
return processed_features
def lowerCamelCase__ ( self : List[Any] , __snake_case : int=False , __snake_case : Tuple=None ) -> Optional[Any]:
# Get padding strategy
if padding is not False:
if padding is True:
__magic_name__: Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(__snake_case , __snake_case ):
__magic_name__: Tuple = PaddingStrategy(__snake_case )
elif isinstance(__snake_case , __snake_case ):
__magic_name__: Dict = padding
else:
__magic_name__: int = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 96 | 1 |
import inspect
import unittest
import torch
import torch.nn as nn
from accelerate.hooks import (
AlignDevicesHook,
ModelHook,
SequentialHook,
add_hook_to_module,
attach_align_device_hook,
remove_hook_from_module,
remove_hook_from_submodules,
)
from accelerate.test_utils import require_multi_gpu
class UpperCamelCase__ ( nn.Module ):
def __init__(self : List[Any] ):
super().__init__()
__a : str = nn.Linear(3 , 4 )
__a : Tuple = nn.BatchNormad(4 )
__a : str = nn.Linear(4 , 5 )
def lowerCAmelCase (self : Any , snake_case_ : int ):
return self.lineara(self.batchnorm(self.lineara(_lowercase ) ) )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
def lowerCAmelCase (self : int , snake_case_ : Optional[Any] , *snake_case_ : Union[str, Any] , **snake_case_ : Union[str, Any] ):
return (args[0] + 1,) + args[1:], kwargs
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
def lowerCAmelCase (self : int , snake_case_ : List[str] , snake_case_ : Dict ):
return output + 1
class UpperCamelCase__ ( unittest.TestCase ):
def lowerCAmelCase (self : List[Any] ):
__a : Tuple = ModelForTest()
__a : Union[str, Any] = ModelHook()
add_hook_to_module(_lowercase , _lowercase )
self.assertEqual(test_model._hf_hook , _lowercase )
self.assertTrue(hasattr(_lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(_lowercase )
self.assertFalse(hasattr(_lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(_lowercase , '''_old_forward''' ) )
def lowerCAmelCase (self : int ):
__a : str = ModelForTest()
__a : List[Any] = ModelHook()
add_hook_to_module(_lowercase , _lowercase )
add_hook_to_module(_lowercase , _lowercase , append=_lowercase )
self.assertEqual(isinstance(test_model._hf_hook , _lowercase ) , _lowercase )
self.assertEqual(len(test_model._hf_hook.hooks ) , 2 )
self.assertTrue(hasattr(_lowercase , '''_old_forward''' ) )
# Check adding the hook did not change the name or the signature
self.assertEqual(test_model.forward.__name__ , '''forward''' )
self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] )
remove_hook_from_module(_lowercase )
self.assertFalse(hasattr(_lowercase , '''_hf_hook''' ) )
self.assertFalse(hasattr(_lowercase , '''_old_forward''' ) )
def lowerCAmelCase (self : Any ):
__a : Union[str, Any] = ModelForTest()
__a : List[Any] = torch.randn(2 , 3 )
__a : List[Any] = test_model(x + 1 )
__a : Tuple = test_model(x + 2 )
__a : List[str] = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase )
__a : Dict = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__a : List[Any] = PreForwardHook()
add_hook_to_module(_lowercase , _lowercase )
__a : Tuple = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__a : Any = SequentialHook(PreForwardHook() , PreForwardHook() )
add_hook_to_module(_lowercase , _lowercase )
__a : List[Any] = test_model(_lowercase )
assert torch.allclose(_lowercase , _lowercase , atol=1E-5 )
def lowerCAmelCase (self : int ):
__a : Tuple = ModelForTest()
__a : Union[str, Any] = torch.randn(2 , 3 )
__a : Optional[Any] = test_model(_lowercase )
__a : int = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase )
__a : Tuple = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5 ) )
# Attaching a hook to a model when it already has one replaces, does not chain
__a : Dict = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase )
__a : Dict = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , output + 1 , atol=1E-5 ) )
# You need to use the sequential hook to chain two or more hooks
__a : List[str] = SequentialHook(PostForwardHook() , PostForwardHook() )
add_hook_to_module(_lowercase , _lowercase )
__a : str = test_model(_lowercase )
assert torch.allclose(_lowercase , output + 2 , atol=1E-5 )
def lowerCAmelCase (self : List[str] ):
__a : Tuple = ModelForTest()
__a : Dict = torch.randn(2 , 3 )
__a : Optional[int] = test_model(_lowercase )
__a : List[str] = PostForwardHook()
add_hook_to_module(_lowercase , _lowercase )
__a : Tuple = test_model(_lowercase )
self.assertTrue(torch.allclose(_lowercase , output + 1 ) )
self.assertTrue(outputa.requires_grad )
__a : Optional[int] = True
__a : List[Any] = test_model(_lowercase )
self.assertFalse(outputa.requires_grad )
@require_multi_gpu
def lowerCAmelCase (self : Optional[Any] ):
__a : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) )
add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) )
self.assertEqual(model.lineara.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) )
self.assertEqual(model.lineara.weight.device , torch.device(1 ) )
# We can still make a forward pass. The input does not need to be on any particular device
__a : Tuple = torch.randn(2 , 3 )
__a : Optional[Any] = model(_lowercase )
self.assertEqual(output.device , torch.device(1 ) )
# We can add a general hook to put back output on same device as input.
add_hook_to_module(_lowercase , AlignDevicesHook(io_same_device=_lowercase ) )
__a : List[Any] = torch.randn(2 , 3 ).to(0 )
__a : Tuple = model(_lowercase )
self.assertEqual(output.device , torch.device(0 ) )
def lowerCAmelCase (self : Dict ):
__a : Any = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__a : Optional[Any] = {"""execution_device""": 0 if torch.cuda.is_available() else """cpu""", """offload""": True}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__a : Union[str, Any] = torch.device(hook_kwargs['''execution_device'''] )
self.assertEqual(model.batchnorm.running_mean.device , _lowercase )
__a : Optional[int] = torch.randn(2 , 3 )
__a : Dict = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
__a : List[str] = {
"""execution_device""": 0 if torch.cuda.is_available() else """cpu""",
"""offload""": True,
"""offload_buffers""": True,
}
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.batchnorm , AlignDevicesHook(**_lowercase ) )
add_hook_to_module(model.lineara , AlignDevicesHook(**_lowercase ) )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__a : Dict = torch.randn(2 , 3 )
__a : Dict = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_module(model.lineara )
remove_hook_from_module(model.batchnorm )
remove_hook_from_module(model.lineara )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__a : Tuple = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__a : List[Any] = torch.device(_lowercase )
self.assertEqual(model.batchnorm.running_mean.device , _lowercase )
__a : int = torch.randn(2 , 3 )
__a : Optional[Any] = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(_lowercase , execution_device=_lowercase , offload=_lowercase , offload_buffers=_lowercase )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__a : List[Any] = torch.randn(2 , 3 )
__a : int = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
def lowerCAmelCase (self : Union[str, Any] ):
__a : Optional[int] = ModelForTest()
# Everything is on CPU
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# This will move each submodule on different devices
__a : Dict = 0 if torch.cuda.is_available() else """cpu"""
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() )
# Parameters have been offloaded, so on the meta device
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
# Buffers are not included in the offload by default, so are on the execution device
__a : List[Any] = torch.device(_lowercase )
self.assertEqual(model.batchnorm.running_mean.device , _lowercase )
__a : Any = torch.randn(2 , 3 )
__a : int = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
# Now test with buffers included in the offload
attach_align_device_hook(
_lowercase , execution_device=_lowercase , offload=_lowercase , weights_map=model.state_dict() , offload_buffers=_lowercase , )
# Parameters have been offloaded, so on the meta device, buffers included
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) )
self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) )
__a : Optional[int] = torch.randn(2 , 3 )
__a : str = model(_lowercase )
self.assertEqual(output.device , _lowercase )
# Removing hooks loads back the weights in the model.
remove_hook_from_submodules(_lowercase )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) )
self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
| 721 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Model name or path of model to be trained."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="./" ,metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" ,metadata={"help": "Name or path of training dataset."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" ,metadata={"help": "Name or path of validation dataset."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 ,metadata={"help": "Batch size for training."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 ,metadata={"help": "Batch size for evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.1 ,metadata={"help": "Value of weight decay."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=10_000 ,metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=2e-4 ,metadata={"help": "Learning rate fo training."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="cosine" ,metadata={"help": "Learning rate."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=750 ,metadata={"help": "Number of warmup steps in the learning rate schedule."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=16 ,metadata={"help": "Number of gradient accumulation steps."} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowercase ,metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=50_000 ,metadata={"help": "Maximum number of training steps."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 ,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1_024 ,metadata={"help": "Sequence lengths used for training."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 ,metadata={"help": "Training seed."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=1_024 ,metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} ,)
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase ,metadata={"help": "States path if the training should continue from a checkpoint folder."} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(default=__lowercase ,metadata={"help": "If True the data is pretokenized."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Model name or path of model to be evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" ,metadata={"help": "Name or path of validation dataset."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=2 ,metadata={"help": "Batch size used for evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 ,metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1_024 ,metadata={"help": "Length of sequences to be evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 ,metadata={"help": "Random seed used for evaluation."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Model name or path of model to be evaluated."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=__lowercase ,metadata={"help": "Number of workers used for code evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase ,metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} ,)
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowercase ,metadata={"help": "Sample from the language model's output distribution."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.2 ,metadata={"help": "Sampling temperature used for generation."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=256 ,metadata={"help": "Maximum number of newly generated tokens."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=0 ,metadata={"help": "Top-k parameter used for generation."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(default=0.95 ,metadata={"help": "Top-p parameter used for nucleus sampling."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=10 ,metadata={"help": "Number of generations to run in parallel."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=200 ,metadata={"help": "Number of completions to generate for each sample."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=1 ,metadata={"help": "Random seed used for evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="eval_results.json" ,metadata={"help": "Random seed used for evaluation."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="0" ,metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=-1 ,metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} ,)
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=__lowercase ,metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} ,)
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="transformersbook/codeparrot" ,metadata={"help": "Folder or name of dataset to process."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot-clean" ,metadata={"help": "Folder to save processed processed dataset."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=100_000 ,metadata={"help": "Number of files to save per JSON output file."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="content" ,metadata={"help": "Column containing text data to process."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=1_000 ,metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=100 ,metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.25 ,metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=1.5 ,metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.7 ,metadata={"help": "Probability for filtering config, test and uncommon files."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Name or path to the tokenizer."} ,)
_SCREAMING_SNAKE_CASE : Optional[bool] = field(
default=__lowercase ,metadata={"help": "If True, near-duplicate samples are removed."} )
_SCREAMING_SNAKE_CASE : Optional[float] = field(
default=0.85 ,metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="gpt2" ,metadata={"help": "Base tokenizer to build new tokenizer from."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="transformersbook/codeparrot-train" ,metadata={"help": "Dataset to train tokenizer on."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="content" ,metadata={"help": "Column containing text data to process."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=200_000 ,metadata={"help": "Number of examples to train tokenizer on."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(
default=32_768 ,metadata={"help": "Number of examples to train the tokenizer on."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="codeparrot" ,metadata={"help": "Name of new tokenizer."} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(default=__lowercase ,metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Name or path to the tokenizer."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" ,metadata={"help": "Name or path to the dataset to pretokenize."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="tokenized-codeparrot-train" ,metadata={"help": "Repo name of the pretokenized data."} )
_SCREAMING_SNAKE_CASE : Optional[int] = field(default=__lowercase ,metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class UpperCamelCase__ :
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="gpt2-large" ,metadata={"help": "Configuration to use for model initialization."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(
default="codeparrot/codeparrot" ,metadata={"help": "Tokenizer attached to model."} )
_SCREAMING_SNAKE_CASE : Optional[str] = field(default="codeparrot" ,metadata={"help": "Name of the created model."} )
_SCREAMING_SNAKE_CASE : Optional[bool] = field(default=__lowercase ,metadata={"help": "Push saved tokenizer to the hub."} )
| 326 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : Tuple =logging.get_logger(__name__)
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->List[str]:
_lowerCamelCase : Dict = DPTConfig(embedding_type='''hybrid''' )
if "large" in checkpoint_url:
_lowerCamelCase : Optional[int] = 1024
_lowerCamelCase : List[Any] = 4096
_lowerCamelCase : Union[str, Any] = 24
_lowerCamelCase : Optional[Any] = 16
_lowerCamelCase : Any = [5, 11, 17, 23]
_lowerCamelCase : Optional[int] = [256, 512, 1024, 1024]
_lowerCamelCase : Any = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
_lowerCamelCase : Union[str, Any] = 768
_lowerCamelCase : List[Any] = [1, 1, 1, 0.5]
_lowerCamelCase : Any = [256, 512, 768, 768]
_lowerCamelCase : str = 150
_lowerCamelCase : Union[str, Any] = 16
_lowerCamelCase : Dict = (1, 384, 384)
_lowerCamelCase : Optional[int] = False
_lowerCamelCase : Dict = '''project'''
if "ade" in checkpoint_url:
_lowerCamelCase : Tuple = True
_lowerCamelCase : Union[str, Any] = 768
_lowerCamelCase : List[Any] = [1, 1, 1, 0.5]
_lowerCamelCase : Dict = 150
_lowerCamelCase : Any = 16
_lowerCamelCase : Optional[Any] = '''huggingface/label-files'''
_lowerCamelCase : List[Any] = '''ade20k-id2label.json'''
_lowerCamelCase : List[str] = json.load(open(cached_download(hf_hub_url(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='''dataset''' ) ) , '''r''' ) )
_lowerCamelCase : Optional[int] = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
_lowerCamelCase : Any = idalabel
_lowerCamelCase : str = {v: k for k, v in idalabel.items()}
_lowerCamelCase : int = [1, 150, 480, 480]
return config, expected_shape
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Any:
_lowerCamelCase : List[str] = ['''pretrained.model.head.weight''', '''pretrained.model.head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ ) ->Any:
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
_lowerCamelCase : Tuple = name.replace('''pretrained.model''' , '''dpt.encoder''' )
if "pretrained.model" in name:
_lowerCamelCase : Optional[Any] = name.replace('''pretrained.model''' , '''dpt.embeddings''' )
if "patch_embed" in name:
_lowerCamelCase : Any = name.replace('''patch_embed''' , '''''' )
if "pos_embed" in name:
_lowerCamelCase : Optional[Any] = name.replace('''pos_embed''' , '''position_embeddings''' )
if "attn.proj" in name:
_lowerCamelCase : Optional[int] = name.replace('''attn.proj''' , '''attention.output.dense''' )
if "proj" in name and "project" not in name:
_lowerCamelCase : List[str] = name.replace('''proj''' , '''projection''' )
if "blocks" in name:
_lowerCamelCase : List[str] = name.replace('''blocks''' , '''layer''' )
if "mlp.fc1" in name:
_lowerCamelCase : List[str] = name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
_lowerCamelCase : str = name.replace('''mlp.fc2''' , '''output.dense''' )
if "norm1" in name and "backbone" not in name:
_lowerCamelCase : int = name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name and "backbone" not in name:
_lowerCamelCase : str = name.replace('''norm2''' , '''layernorm_after''' )
if "scratch.output_conv" in name:
_lowerCamelCase : List[Any] = name.replace('''scratch.output_conv''' , '''head''' )
if "scratch" in name:
_lowerCamelCase : List[Any] = name.replace('''scratch''' , '''neck''' )
if "layer1_rn" in name:
_lowerCamelCase : Optional[int] = name.replace('''layer1_rn''' , '''convs.0''' )
if "layer2_rn" in name:
_lowerCamelCase : Any = name.replace('''layer2_rn''' , '''convs.1''' )
if "layer3_rn" in name:
_lowerCamelCase : Dict = name.replace('''layer3_rn''' , '''convs.2''' )
if "layer4_rn" in name:
_lowerCamelCase : List[str] = name.replace('''layer4_rn''' , '''convs.3''' )
if "refinenet" in name:
_lowerCamelCase : str = int(name[len('''neck.refinenet''' ) : len('''neck.refinenet''' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
_lowerCamelCase : Optional[int] = name.replace(F'''refinenet{layer_idx}''' , F'''fusion_stage.layers.{abs(layer_idx-4 )}''' )
if "out_conv" in name:
_lowerCamelCase : Dict = name.replace('''out_conv''' , '''projection''' )
if "resConfUnit1" in name:
_lowerCamelCase : Optional[int] = name.replace('''resConfUnit1''' , '''residual_layer1''' )
if "resConfUnit2" in name:
_lowerCamelCase : int = name.replace('''resConfUnit2''' , '''residual_layer2''' )
if "conv1" in name:
_lowerCamelCase : Optional[int] = name.replace('''conv1''' , '''convolution1''' )
if "conv2" in name:
_lowerCamelCase : Optional[Any] = name.replace('''conv2''' , '''convolution2''' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
_lowerCamelCase : Optional[Any] = name.replace('''pretrained.act_postprocess1.0.project.0''' , '''neck.reassemble_stage.readout_projects.0.0''' )
if "pretrained.act_postprocess2.0.project.0" in name:
_lowerCamelCase : str = name.replace('''pretrained.act_postprocess2.0.project.0''' , '''neck.reassemble_stage.readout_projects.1.0''' )
if "pretrained.act_postprocess3.0.project.0" in name:
_lowerCamelCase : Optional[Any] = name.replace('''pretrained.act_postprocess3.0.project.0''' , '''neck.reassemble_stage.readout_projects.2.0''' )
if "pretrained.act_postprocess4.0.project.0" in name:
_lowerCamelCase : Optional[int] = name.replace('''pretrained.act_postprocess4.0.project.0''' , '''neck.reassemble_stage.readout_projects.3.0''' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
_lowerCamelCase : List[str] = name.replace('''pretrained.act_postprocess1.3''' , '''neck.reassemble_stage.layers.0.projection''' )
if "pretrained.act_postprocess1.4" in name:
_lowerCamelCase : Optional[Any] = name.replace('''pretrained.act_postprocess1.4''' , '''neck.reassemble_stage.layers.0.resize''' )
if "pretrained.act_postprocess2.3" in name:
_lowerCamelCase : Optional[Any] = name.replace('''pretrained.act_postprocess2.3''' , '''neck.reassemble_stage.layers.1.projection''' )
if "pretrained.act_postprocess2.4" in name:
_lowerCamelCase : List[str] = name.replace('''pretrained.act_postprocess2.4''' , '''neck.reassemble_stage.layers.1.resize''' )
if "pretrained.act_postprocess3.3" in name:
_lowerCamelCase : Dict = name.replace('''pretrained.act_postprocess3.3''' , '''neck.reassemble_stage.layers.2.projection''' )
if "pretrained.act_postprocess4.3" in name:
_lowerCamelCase : Tuple = name.replace('''pretrained.act_postprocess4.3''' , '''neck.reassemble_stage.layers.3.projection''' )
if "pretrained.act_postprocess4.4" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''pretrained.act_postprocess4.4''' , '''neck.reassemble_stage.layers.3.resize''' )
if "pretrained" in name:
_lowerCamelCase : str = name.replace('''pretrained''' , '''dpt''' )
if "bn" in name:
_lowerCamelCase : Tuple = name.replace('''bn''' , '''batch_norm''' )
if "head" in name:
_lowerCamelCase : Dict = name.replace('''head''' , '''head.head''' )
if "encoder.norm" in name:
_lowerCamelCase : List[str] = name.replace('''encoder.norm''' , '''layernorm''' )
if "auxlayer" in name:
_lowerCamelCase : List[Any] = name.replace('''auxlayer''' , '''auxiliary_head.head''' )
if "backbone" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''backbone''' , '''backbone.bit.encoder''' )
if ".." in name:
_lowerCamelCase : Optional[int] = name.replace('''..''' , '''.''' )
if "stem.conv" in name:
_lowerCamelCase : Union[str, Any] = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
_lowerCamelCase : Optional[Any] = name.replace('''blocks''' , '''layers''' )
if "convolution" in name and "backbone" in name:
_lowerCamelCase : str = name.replace('''convolution''' , '''conv''' )
if "layer" in name and "backbone" in name:
_lowerCamelCase : int = name.replace('''layer''' , '''layers''' )
if "backbone.bit.encoder.bit" in name:
_lowerCamelCase : Optional[int] = name.replace('''backbone.bit.encoder.bit''' , '''backbone.bit''' )
if "embedder.conv" in name:
_lowerCamelCase : List[str] = name.replace('''embedder.conv''' , '''embedder.convolution''' )
if "backbone.bit.encoder.stem.norm" in name:
_lowerCamelCase : List[Any] = name.replace('''backbone.bit.encoder.stem.norm''' , '''backbone.bit.embedder.norm''' )
return name
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->List[str]:
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_lowerCamelCase : str = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.weight''' )
_lowerCamelCase : int = state_dict.pop(F'''dpt.encoder.layer.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_lowerCamelCase : Dict = in_proj_weight[: config.hidden_size, :]
_lowerCamelCase : Union[str, Any] = in_proj_bias[: config.hidden_size]
_lowerCamelCase : int = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_lowerCamelCase : List[Any] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_lowerCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
_lowerCamelCase : Optional[int] = in_proj_bias[-config.hidden_size :]
def UpperCamelCase ( ) ->Any:
_lowerCamelCase : Dict = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_lowerCamelCase : Tuple = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->int:
_lowerCamelCase, _lowerCamelCase : List[str] = get_dpt_config(SCREAMING_SNAKE_CASE_ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
_lowerCamelCase : Dict = torch.load(SCREAMING_SNAKE_CASE_ , map_location='''cpu''' )
# remove certain keys
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
# rename keys
for key in state_dict.copy().keys():
_lowerCamelCase : Optional[int] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Dict = val
# read in qkv matrices
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
_lowerCamelCase : str = DPTForSemanticSegmentation(SCREAMING_SNAKE_CASE_ ) if '''ade''' in checkpoint_url else DPTForDepthEstimation(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
model.eval()
# Check outputs on an image
_lowerCamelCase : Optional[int] = 480 if '''ade''' in checkpoint_url else 384
_lowerCamelCase : Optional[int] = DPTImageProcessor(size=SCREAMING_SNAKE_CASE_ )
_lowerCamelCase : Optional[int] = prepare_img()
_lowerCamelCase : List[Any] = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
# forward pass
_lowerCamelCase : Tuple = model(**SCREAMING_SNAKE_CASE_ ).logits if '''ade''' in checkpoint_url else model(**SCREAMING_SNAKE_CASE_ ).predicted_depth
if show_prediction:
_lowerCamelCase : Tuple = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='''bicubic''' , align_corners=SCREAMING_SNAKE_CASE_ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
model.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
image_processor.push_to_hub('''ybelkada/dpt-hybrid-midas''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt',
type=str,
help='URL of the original DPT checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
)
parser.add_argument(
'--model_name',
default='dpt-large',
type=str,
help='Name of the model, in case you\'re pushing to the hub.',
)
parser.add_argument(
'--show_prediction',
action='store_true',
)
SCREAMING_SNAKE_CASE__ : List[str] =parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
)
| 434 | """simple docstring"""
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self , _lowercase ) -> Dict:
# we need a list not a string, so do something to change the type
_lowerCamelCase : List[str] = arr.split(''',''' )
def a__ ( self ) -> Optional[Any]:
_lowerCamelCase : Optional[Any] = [int(self.array[0] )] * len(self.array )
_lowerCamelCase : str = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_lowerCamelCase : Dict = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_lowerCamelCase : List[str] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : str =input('please input some numbers:')
SCREAMING_SNAKE_CASE__ : str =SubArray(whole_array)
SCREAMING_SNAKE_CASE__ : Optional[int] =array.solve_sub_array()
print(('the results is:', re))
| 434 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = BartphoTokenizer
lowerCAmelCase = False
lowerCAmelCase = True
def a__ ( self ) -> Dict:
super().setUp()
UpperCAmelCase_ : Any = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est''']
UpperCAmelCase_ : str = dict(zip(_SCREAMING_SNAKE_CASE ,range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase_ : str = {'''unk_token''': '''<unk>'''}
UpperCAmelCase_ : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''monolingual_vocab_file'''] )
with open(self.monolingual_vocab_file ,'''w''' ,encoding='''utf-8''' ) as fp:
for token in vocab_tokens:
fp.write(f'''{token} {vocab_tokens[token]}\n''' )
UpperCAmelCase_ : Optional[Any] = BartphoTokenizer(_SCREAMING_SNAKE_CASE ,self.monolingual_vocab_file ,**self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ,**_SCREAMING_SNAKE_CASE ) -> Any:
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname ,**_SCREAMING_SNAKE_CASE )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ : Any = '''This is a là test'''
UpperCAmelCase_ : int = '''This is a<unk><unk> test'''
return input_text, output_text
def a__ ( self ) -> str:
UpperCAmelCase_ : Dict = BartphoTokenizer(_SCREAMING_SNAKE_CASE ,self.monolingual_vocab_file ,**self.special_tokens_map )
UpperCAmelCase_ : Dict = '''This is a là test'''
UpperCAmelCase_ : int = '''▁This ▁is ▁a ▁l à ▁t est'''.split()
UpperCAmelCase_ : List[Any] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = tokens + [tokenizer.unk_token]
UpperCAmelCase_ : str = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE ) | 719 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__a = {'configuration_mra': ['MRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MraConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a = [
'MRA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MraForMaskedLM',
'MraForMultipleChoice',
'MraForQuestionAnswering',
'MraForSequenceClassification',
'MraForTokenClassification',
'MraLayer',
'MraModel',
'MraPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
__a = _LazyModule(__name__, globals()['__file__'], _import_structure) | 300 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_lowerCamelCase = 16
_lowerCamelCase = 32
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Accelerator , __UpperCamelCase : int = 16 ) -> Union[str, Any]:
UpperCAmelCase_ = AutoTokenizer.from_pretrained('''bert-base-cased''' )
UpperCAmelCase_ = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__UpperCamelCase : Union[str, Any] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__UpperCamelCase , max_length=__UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ = datasets.map(
__UpperCamelCase , batched=__UpperCamelCase , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__UpperCamelCase : List[str] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ = 8
else:
UpperCAmelCase_ = None
return tokenizer.pad(
__UpperCamelCase , padding='''longest''' , max_length=__UpperCamelCase , pad_to_multiple_of=__UpperCamelCase , return_tensors='''pt''' , )
# Instantiate dataloaders.
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''train'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase , drop_last=__UpperCamelCase )
UpperCAmelCase_ = DataLoader(
tokenized_datasets['''validation'''] , shuffle=__UpperCamelCase , collate_fn=__UpperCamelCase , batch_size=__UpperCamelCase , drop_last=(accelerator.mixed_precision == '''fp8''') , )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
# Initialize accelerator
UpperCAmelCase_ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ = config["lr"]
UpperCAmelCase_ = int(config['''num_epochs'''] )
UpperCAmelCase_ = int(config['''seed'''] )
UpperCAmelCase_ = int(config['''batch_size'''] )
UpperCAmelCase_ = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ = MAX_GPU_BATCH_SIZE
set_seed(__UpperCamelCase )
UpperCAmelCase_ = get_dataloaders(__UpperCamelCase , __UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ = AdamW(params=model.parameters() , lr=__UpperCamelCase )
# Instantiate scheduler
UpperCAmelCase_ = get_linear_schedule_with_warmup(
optimizer=__UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(__UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ = accelerator.prepare(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Now we train the model
for epoch in range(__UpperCamelCase ):
model.train()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ = model(**__UpperCamelCase )
UpperCAmelCase_ = outputs.loss
UpperCAmelCase_ = loss / gradient_accumulation_steps
accelerator.backward(__UpperCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ = model(**__UpperCamelCase )
UpperCAmelCase_ = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__UpperCamelCase , references=__UpperCamelCase , )
UpperCAmelCase_ = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __UpperCamelCase )
def SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
UpperCAmelCase_ = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(__UpperCamelCase , __UpperCamelCase )
if __name__ == "__main__":
main()
| 144 |
from __future__ import annotations
def lowerCAmelCase__( lowercase : list[int] , lowercase : int ) -> int:
if len(lowercase ) < k or k < 0:
raise ValueError("Invalid Input" )
__snake_case : Tuple = sum(array[:k] )
for i in range(len(lowercase ) - k ):
__snake_case : str = current_sum - array[i] + array[i + k]
__snake_case : List[str] = max(lowercase , lowercase )
return max_sum
if __name__ == "__main__":
from doctest import testmod
from random import randint
testmod()
_UpperCamelCase = [randint(-1000, 1000) for i in range(100)]
_UpperCamelCase = randint(0, 110)
print(F'''The maximum sum of {k} consecutive elements is {max_sum_in_array(array,k)}''')
| 243 | 0 |
'''simple docstring'''
from itertools import permutations
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> bool:
"""simple docstring"""
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_SCREAMING_SNAKE_CASE = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 1_00 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
"""simple docstring"""
return sum(
int("""""".join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 718 |
'''simple docstring'''
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = ''
SCREAMING_SNAKE_CASE = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self , A__ = None , A__ = None , **A__ , ) -> Optional[int]:
super().__init__(self , **A__ )
_SCREAMING_SNAKE_CASE = repo_info
_SCREAMING_SNAKE_CASE = token
_SCREAMING_SNAKE_CASE = None
def UpperCamelCase ( self ) -> Tuple:
if self.dir_cache is None:
_SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_SCREAMING_SNAKE_CASE = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(A__ ): {"""name""": str(A__ ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def UpperCamelCase ( self , A__ , A__ = "rb" , **A__ , ) -> Optional[int]:
if not isinstance(self.repo_info , A__ ):
raise NotImplementedError(F"Open is only implemented for dataset repositories, but got {self.repo_info}" )
_SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , A__ , revision=self.repo_info.sha )
return fsspec.open(
A__ , mode=A__ , headers=get_authentication_headers_for_url(A__ , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def UpperCamelCase ( self , A__ , **A__ ) -> str:
self._get_dirs()
_SCREAMING_SNAKE_CASE = self._strip_protocol(A__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(A__ )
def UpperCamelCase ( self , A__ , A__=False , **A__ ) -> List[Any]:
self._get_dirs()
_SCREAMING_SNAKE_CASE = PurePosixPath(path.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
_SCREAMING_SNAKE_CASE = PurePosixPath(p.strip("""/""" ) )
_SCREAMING_SNAKE_CASE = p.parent
if root == path:
_SCREAMING_SNAKE_CASE = f
_SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 0 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = BertJapaneseTokenizer
lowerCAmelCase_ : Optional[Any] = False
lowerCAmelCase_ : str = True
def A__ ( self ):
super().setUp()
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = "こんにちは、世界。 \nこんばんは、世界。"
UpperCAmelCase_ = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.get_input_output_texts(UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer.encode(UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer.decode(UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
return text, ids
def A__ ( self ):
pass # TODO add if relevant
def A__ ( self ):
pass # TODO add if relevant
def A__ ( self ):
pass # TODO add if relevant
def A__ ( self ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(UpperCAmelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def A__ ( self ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(UpperCAmelCase_ )
UpperCAmelCase_ = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCAmelCase_ , "wb" ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , "rb" ) as handle:
UpperCAmelCase_ = pickle.load(UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
def A__ ( self ):
UpperCAmelCase_ = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def A__ ( self ):
try:
UpperCAmelCase_ = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def A__ ( self ):
try:
UpperCAmelCase_ = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def A__ ( self ):
UpperCAmelCase_ = MecabTokenizer(do_lower_case=UpperCAmelCase_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def A__ ( self ):
try:
UpperCAmelCase_ = MecabTokenizer(
do_lower_case=UpperCAmelCase_ , normalize_text=UpperCAmelCase_ , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def A__ ( self ):
UpperCAmelCase_ = MecabTokenizer(normalize_text=UpperCAmelCase_ , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def A__ ( self ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(UpperCAmelCase_ )
UpperCAmelCase_ = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCAmelCase_ , "wb" ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , "rb" ) as handle:
UpperCAmelCase_ = pickle.load(UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_sudachi
def A__ ( self ):
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def A__ ( self ):
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def A__ ( self ):
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def A__ ( self ):
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def A__ ( self ):
UpperCAmelCase_ = SudachiTokenizer(do_lower_case=UpperCAmelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def A__ ( self ):
UpperCAmelCase_ = SudachiTokenizer(normalize_text=UpperCAmelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def A__ ( self ):
UpperCAmelCase_ = SudachiTokenizer(trim_whitespace=UpperCAmelCase_ , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def A__ ( self ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(UpperCAmelCase_ )
UpperCAmelCase_ = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase_ = tokenizer.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(UpperCAmelCase_ , "wb" ) as handle:
pickle.dump(UpperCAmelCase_ , UpperCAmelCase_ )
with open(UpperCAmelCase_ , "rb" ) as handle:
UpperCAmelCase_ = pickle.load(UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer_new.tokenize(UpperCAmelCase_ )
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@require_jumanpp
def A__ ( self ):
UpperCAmelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def A__ ( self ):
UpperCAmelCase_ = JumanppTokenizer(do_lower_case=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def A__ ( self ):
UpperCAmelCase_ = JumanppTokenizer(normalize_text=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def A__ ( self ):
UpperCAmelCase_ = JumanppTokenizer(trim_whitespace=UpperCAmelCase_ )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def A__ ( self ):
UpperCAmelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def A__ ( self ):
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
UpperCAmelCase_ = {}
for i, token in enumerate(UpperCAmelCase_ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = WordpieceTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def A__ ( self ):
UpperCAmelCase_ = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
UpperCAmelCase_ = tokenizer.subword_tokenizer
UpperCAmelCase_ = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(UpperCAmelCase_ , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
UpperCAmelCase_ = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(UpperCAmelCase_ , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def A__ ( self ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
UpperCAmelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = BertJapaneseTokenizer
lowerCAmelCase_ : Tuple = False
def A__ ( self ):
super().setUp()
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def A__ ( self , **lowerCAmelCase ):
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **UpperCAmelCase_ )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = "こんにちは、世界。 \nこんばんは、世界。"
UpperCAmelCase_ = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def A__ ( self ):
pass # TODO add if relevant
def A__ ( self ):
pass # TODO add if relevant
def A__ ( self ):
pass # TODO add if relevant
def A__ ( self ):
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
UpperCAmelCase_ = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
UpperCAmelCase_ , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def A__ ( self ):
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCAmelCase_ = {}
for i, token in enumerate(UpperCAmelCase_ ):
UpperCAmelCase_ = i
UpperCAmelCase_ = CharacterTokenizer(vocab=UpperCAmelCase_ , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def A__ ( self ):
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
UpperCAmelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_ )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = "cl-tohoku/bert-base-japanese"
UpperCAmelCase_ = AutoTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
UpperCAmelCase_ = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
| 579 |
from typing import Callable, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case = logging.get_logger(__name__)
snake_case = {
"""microsoft/xprophetnet-large-wiki100-cased""": (
"""https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE ( lowerCAmelCase ):
'''simple docstring'''
UpperCamelCase_ : Tuple = '''xlm-prophetnet'''
UpperCamelCase_ : Tuple = ['''past_key_values''']
UpperCamelCase_ : int = {
'''num_attention_heads''': '''num_encoder_attention_heads''',
}
def __init__( self : Dict , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[Union[str, Callable]] = "gelu" , UpperCAmelCase_ : Optional[int] = 3_0522 , UpperCAmelCase_ : Optional[int] = 1024 , UpperCAmelCase_ : Optional[int] = 4096 , UpperCAmelCase_ : Optional[int] = 12 , UpperCAmelCase_ : Optional[int] = 16 , UpperCAmelCase_ : Optional[int] = 4096 , UpperCAmelCase_ : Optional[int] = 12 , UpperCAmelCase_ : Optional[int] = 16 , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[float] = 0.1 , UpperCAmelCase_ : Optional[int] = 512 , UpperCAmelCase_ : Optional[float] = 0.02 , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[int] = 0 , UpperCAmelCase_ : Optional[int] = 2 , UpperCAmelCase_ : Optional[int] = 32 , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[bool] = False , UpperCAmelCase_ : Optional[float] = 0.0 , UpperCAmelCase_ : Optional[bool] = True , UpperCAmelCase_ : Optional[int] = 0 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : Optional[int] = 2 , **UpperCAmelCase_ : Dict , ):
SCREAMING_SNAKE_CASE : Any = vocab_size
SCREAMING_SNAKE_CASE : str = hidden_size
SCREAMING_SNAKE_CASE : Tuple = encoder_ffn_dim
SCREAMING_SNAKE_CASE : List[Any] = num_encoder_layers
SCREAMING_SNAKE_CASE : Any = num_encoder_attention_heads
SCREAMING_SNAKE_CASE : Dict = decoder_ffn_dim
SCREAMING_SNAKE_CASE : List[str] = num_decoder_layers
SCREAMING_SNAKE_CASE : List[Any] = num_decoder_attention_heads
SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE : Dict = init_std # Normal(0, this parameter)
SCREAMING_SNAKE_CASE : Union[str, Any] = activation_function
# parameters for xlmprophetnet
SCREAMING_SNAKE_CASE : Dict = ngram
SCREAMING_SNAKE_CASE : Any = num_buckets
SCREAMING_SNAKE_CASE : str = relative_max_distance
SCREAMING_SNAKE_CASE : str = disable_ngram_loss
SCREAMING_SNAKE_CASE : Dict = eps
# 3 Types of Dropout
SCREAMING_SNAKE_CASE : Union[str, Any] = attention_dropout
SCREAMING_SNAKE_CASE : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE : List[Any] = dropout
SCREAMING_SNAKE_CASE : int = use_cache
super().__init__(
pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , is_encoder_decoder=UpperCAmelCase_ , add_cross_attention=UpperCAmelCase_ , decoder_start_token_id=UpperCAmelCase_ , **UpperCAmelCase_ , )
@property
def _A ( self : int ):
return self.num_encoder_layers + self.num_decoder_layers
@num_hidden_layers.setter
def _A ( self : str , UpperCAmelCase_ : Optional[Any] ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `num_encoder_layers` and"
" `num_decoder_layers`." )
| 62 | 0 |
def _lowerCAmelCase ( __lowerCamelCase : int , __lowerCamelCase : int ):
"""simple docstring"""
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 447 |
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import MaMaaaTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from transformers.utils import is_sentencepiece_available
if is_sentencepiece_available():
from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json
from ...test_tokenization_common import TokenizerTesterMixin
if is_sentencepiece_available():
_lowerCamelCase = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_lowerCamelCase = 128022
_lowerCamelCase = 128028
@require_sentencepiece
class _SCREAMING_SNAKE_CASE (UpperCamelCase , unittest.TestCase ):
lowerCAmelCase = MaMaaaTokenizer
lowerCAmelCase = False
lowerCAmelCase = False
lowerCAmelCase = True
def __snake_case ( self : Union[str, Any] )->Optional[Any]:
super().setUp()
__SCREAMING_SNAKE_CASE : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"]
__SCREAMING_SNAKE_CASE : List[str] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE : int = Path(self.tmpdirname )
save_json(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["vocab_file"] )
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(UpperCamelCase , save_dir / VOCAB_FILES_NAMES["spm_file"] )
__SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : Optional[int] , **UpperCamelCase : Any )->Dict:
return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def __snake_case ( self : Dict , UpperCamelCase : List[str] )->int:
return (
"This is a test",
"This is a test",
)
def __snake_case ( self : str )->Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = "</s>"
__SCREAMING_SNAKE_CASE : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase )
def __snake_case ( self : Tuple )->Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Any = list(tokenizer.get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "</s>" )
self.assertEqual(vocab_keys[1] , "<unk>" )
self.assertEqual(vocab_keys[-1] , "<s>" )
self.assertEqual(len(UpperCamelCase ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) )
@unittest.skip("Skip this test while all models are still to be uploaded." )
def __snake_case ( self : Dict )->Dict:
pass
def __snake_case ( self : Union[str, Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer.tokenize("This is a test" )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [2, 3, 4, 5, 6] , )
__SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] )
self.assertListEqual(UpperCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
__SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_string(UpperCamelCase )
self.assertEqual(UpperCamelCase , "This is a test" )
@slow
def __snake_case ( self : Any )->Union[str, Any]:
# fmt: off
__SCREAMING_SNAKE_CASE : Tuple = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCamelCase , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , )
@require_torch
@require_sentencepiece
@require_tokenizers
class _SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = """facebook/m2m100_418M"""
lowerCAmelCase = [
"""In my opinion, there are two levels of response from the French government.""",
"""NSA Affair Emphasizes Complete Lack of Debate on Intelligence""",
]
lowerCAmelCase = [
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
]
# fmt: off
lowerCAmelCase = [EN_CODE, 593, 1949, 11_5781, 4, 7_1586, 4234, 6_0633, 12_6233, 432, 12_3808, 1_5592, 1197, 11_7132, 12_0618, 5, 2]
@classmethod
def __snake_case ( cls : List[Any] )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="en" , tgt_lang="fr" )
__SCREAMING_SNAKE_CASE : Optional[Any] = 1
return cls
def __snake_case ( self : Dict )->Any:
self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 )
self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 )
self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 )
self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 )
def __snake_case ( self : Any )->Union[str, Any]:
__SCREAMING_SNAKE_CASE : str = self.tokenizer.get_vocab()
self.assertEqual(len(UpperCamelCase ) , self.tokenizer.vocab_size )
self.assertEqual(vocab["<unk>"] , 3 )
self.assertIn(self.tokenizer.get_lang_token("en" ) , UpperCamelCase )
def __snake_case ( self : List[Any] )->str:
__SCREAMING_SNAKE_CASE : Union[str, Any] = "en"
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCamelCase )
def __snake_case ( self : Union[str, Any] )->List[Any]:
self.assertIn(UpperCamelCase , self.tokenizer.all_special_ids )
# fmt: off
__SCREAMING_SNAKE_CASE : Dict = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2]
# fmt: on
__SCREAMING_SNAKE_CASE : int = self.tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertNotIn(self.tokenizer.eos_token , UpperCamelCase )
def __snake_case ( self : Any )->List[Any]:
__SCREAMING_SNAKE_CASE : int = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer.lang_token_to_id
self.tokenizer.save_pretrained(UpperCamelCase )
__SCREAMING_SNAKE_CASE : Any = MaMaaaTokenizer.from_pretrained(UpperCamelCase )
self.assertDictEqual(new_tok.lang_token_to_id , UpperCamelCase )
@require_torch
def __snake_case ( self : Any )->int:
__SCREAMING_SNAKE_CASE : List[str] = "en"
__SCREAMING_SNAKE_CASE : Dict = "fr"
__SCREAMING_SNAKE_CASE : str = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCamelCase , return_tensors="pt" )
__SCREAMING_SNAKE_CASE : Optional[int] = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id )
for k in batch:
__SCREAMING_SNAKE_CASE : Union[str, Any] = batch[k].tolist()
# batch = {k: v.tolist() for k,v in batch.items()}
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
# batch.decoder_inputs_ids[0][0] ==
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == FR_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2] == [2, FR_CODE]
@require_torch
def __snake_case ( self : str )->int:
__SCREAMING_SNAKE_CASE : Optional[Any] = "mr"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
__SCREAMING_SNAKE_CASE : Optional[int] = "zh"
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
@require_torch
def __snake_case ( self : Optional[Any] )->List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = "mr"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
__SCREAMING_SNAKE_CASE : Optional[int] = "zh"
self.tokenizer._switch_to_target_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] )
self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
self.tokenizer._switch_to_input_mode()
self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] )
@require_torch
def __snake_case ( self : Tuple )->Optional[int]:
__SCREAMING_SNAKE_CASE : Any = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" )
self.assertEqual(
nested_simplify(UpperCamelCase ) , {
# en_XX, A, test, EOS
"input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 1_2_8_0_0_6,
} , )
| 447 | 1 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
SCREAMING_SNAKE_CASE_ = 0b101_100_111_110_110_010_010_000_011_110_111_011_000_110_011_110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
SCREAMING_SNAKE_CASE_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class a :
def __init__( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = WATERMARK_BITS
_UpperCAmelCase : List[str] = WatermarkEncoder()
self.encoder.set_watermark("bits" , self.watermark )
def _UpperCAmelCase ( self , A_ ):
'''simple docstring'''
if images.shape[-1] < 256:
return images
_UpperCAmelCase : List[str] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase : int = [self.encoder.encode(A_ , "dwtDct" ) for image in images]
_UpperCAmelCase : str = torch.from_numpy(np.array(A_ ) ).permute(0 , 3 , 1 , 2 )
_UpperCAmelCase : Union[str, Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 300 |
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
SCREAMING_SNAKE_CASE_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[float] ) -> list[float]:
_UpperCAmelCase : List[str] = []
_UpperCAmelCase : Any = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
_UpperCAmelCase : float = -1
for j in range(i + 1 , lowerCAmelCase ):
if arr[i] < arr[j]:
_UpperCAmelCase : Any = arr[j]
break
result.append(lowerCAmelCase )
return result
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[float] ) -> list[float]:
_UpperCAmelCase : Any = []
for i, outer in enumerate(lowerCAmelCase ):
_UpperCAmelCase : float = -1
for inner in arr[i + 1 :]:
if outer < inner:
_UpperCAmelCase : List[str] = inner
break
result.append(lowerCAmelCase )
return result
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: list[float] ) -> list[float]:
_UpperCAmelCase : Tuple = len(lowerCAmelCase )
_UpperCAmelCase : list[float] = []
_UpperCAmelCase : list[float] = [-1] * arr_size
for index in reversed(range(lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
_UpperCAmelCase : Dict = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
SCREAMING_SNAKE_CASE_ = (
'from __main__ import arr, next_greatest_element_slow, '
'next_greatest_element_fast, next_greatest_element'
)
print(
'next_greatest_element_slow():',
timeit('next_greatest_element_slow(arr)', setup=setup),
)
print(
'next_greatest_element_fast():',
timeit('next_greatest_element_fast(arr)', setup=setup),
)
print(
' next_greatest_element():',
timeit('next_greatest_element(arr)', setup=setup),
)
| 300 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / '''utils'''))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : int ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase :List[str] = mock.Mock()
UpperCamelCase :List[Any] = 500
UpperCamelCase :List[Any] = {}
UpperCamelCase :Tuple = HTTPError
UpperCamelCase :Union[str, Any] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase :Union[str, Any] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__lowerCamelCase ) as mock_head:
UpperCamelCase :List[str] = BertTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _A ( self : Union[str, Any] ):
# A mock response for an HTTP head request to emulate server down
UpperCamelCase :Union[str, Any] = mock.Mock()
UpperCamelCase :Any = 500
UpperCamelCase :Optional[Any] = {}
UpperCamelCase :Optional[int] = HTTPError
UpperCamelCase :List[Any] = {}
# Download this model to make sure it's in the cache.
UpperCamelCase :Any = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("""requests.Session.request""" , return_value=__lowerCamelCase ) as mock_head:
UpperCamelCase :List[str] = GPTaTokenizerFast.from_pretrained("""gpt2""" )
# This check we did call the fake head request
mock_head.assert_called()
def _A ( self : Optional[Any] ):
# This test is for deprecated behavior and can be removed in v5
try:
UpperCamelCase :List[Any] = tempfile.mktemp()
with open(__lowerCamelCase , """wb""" ) as f:
http_get("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" , __lowerCamelCase )
UpperCamelCase :Union[str, Any] = AlbertTokenizer.from_pretrained(__lowerCamelCase )
finally:
os.remove(__lowerCamelCase )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("""tokenizer.json""" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("""tokenizer.json""" , """wb""" ) as f:
http_get("""https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json""" , __lowerCamelCase )
UpperCamelCase :Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 1_000 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("""tokenizer.json""" )
def _A ( self : Dict ):
# This test is for deprecated behavior and can be removed in v5
UpperCamelCase :Optional[int] = AlbertTokenizer.from_pretrained("""https://huggingface.co/albert-base-v1/resolve/main/spiece.model""" )
@is_staging_test
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
snake_case__ : Union[str, Any] = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def _A ( cls : Optional[Any] ):
UpperCamelCase :Optional[int] = TOKEN
HfFolder.save_token(__lowerCamelCase )
@classmethod
def _A ( cls : Tuple ):
try:
delete_repo(token=cls._token , repo_id="""test-tokenizer""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-tokenizer-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-tokenizer""" )
except HTTPError:
pass
def _A ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :List[str] = os.path.join(__lowerCamelCase , """vocab.txt""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCamelCase :Any = BertTokenizer(__lowerCamelCase )
tokenizer.push_to_hub("""test-tokenizer""" , use_auth_token=self._token )
UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""test-tokenizer""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__lowerCamelCase , repo_id="""test-tokenizer""" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _A ( self : Optional[Any] ):
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :List[Any] = os.path.join(__lowerCamelCase , """vocab.txt""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCamelCase :Optional[Any] = BertTokenizer(__lowerCamelCase )
tokenizer.push_to_hub("""valid_org/test-tokenizer-org""" , use_auth_token=self._token )
UpperCamelCase :Optional[int] = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-tokenizer-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__lowerCamelCase , repo_id="""valid_org/test-tokenizer-org""" , push_to_hub=__lowerCamelCase , use_auth_token=self._token )
UpperCamelCase :int = BertTokenizer.from_pretrained("""valid_org/test-tokenizer-org""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _A ( self : Tuple ):
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Optional[int] = os.path.join(__lowerCamelCase , """vocab.txt""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCamelCase :Tuple = CustomTokenizer(__lowerCamelCase )
# No fast custom tokenizer
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase :Dict = os.path.join(__lowerCamelCase , """vocab.txt""" )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) )
UpperCamelCase :Dict = BertTokenizerFast.from_pretrained(__lowerCamelCase )
bert_tokenizer.save_pretrained(__lowerCamelCase )
UpperCamelCase :List[str] = CustomTokenizerFast.from_pretrained(__lowerCamelCase )
tokenizer.push_to_hub("""test-dynamic-tokenizer""" , use_auth_token=self._token )
UpperCamelCase :List[Any] = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizerFast""" )
UpperCamelCase :List[str] = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=__lowerCamelCase , trust_remote_code=__lowerCamelCase )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , """CustomTokenizer""" )
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def _A ( self : int ):
UpperCamelCase :Any = Trie()
trie.add("""Hello 友達""" )
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {""" """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
trie.add("""Hello""" )
trie.data
self.assertEqual(trie.data , {"""H""": {"""e""": {"""l""": {"""l""": {"""o""": {"""""": 1, """ """: {"""友""": {"""達""": {"""""": 1}}}}}}}}} )
def _A ( self : List[str] ):
UpperCamelCase :List[Any] = Trie()
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS] This is a extra_id_100"""] )
trie.add("""[CLS]""" )
trie.add("""extra_id_1""" )
trie.add("""extra_id_100""" )
self.assertEqual(trie.split("""[CLS] This is a extra_id_100""" ) , ["""[CLS]""", """ This is a """, """extra_id_100"""] )
def _A ( self : List[str] ):
UpperCamelCase :int = Trie()
trie.add("""A""" )
self.assertEqual(trie.split("""ABC""" ) , ["""A""", """BC"""] )
self.assertEqual(trie.split("""BCA""" ) , ["""BC""", """A"""] )
def _A ( self : List[Any] ):
UpperCamelCase :int = Trie()
trie.add("""TOKEN]""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def _A ( self : Tuple ):
UpperCamelCase :Dict = Trie()
trie.add("""A""" )
trie.add("""P""" )
trie.add("""[SPECIAL_TOKEN]""" )
self.assertEqual(trie.split("""This is something [SPECIAL_TOKEN]""" ) , ["""This is something """, """[SPECIAL_TOKEN]"""] )
def _A ( self : str ):
UpperCamelCase :Dict = Trie()
trie.add("""AB""" )
trie.add("""B""" )
trie.add("""C""" )
self.assertEqual(trie.split("""ABC""" ) , ["""AB""", """C"""] )
def _A ( self : List[str] ):
UpperCamelCase :Tuple = Trie()
trie.add("""ABC""" )
trie.add("""B""" )
trie.add("""CD""" )
self.assertEqual(trie.split("""ABCD""" ) , ["""ABC""", """D"""] )
def _A ( self : Tuple ):
# Even if the offsets are wrong, we necessarily output correct string
# parts.
UpperCamelCase :List[Any] = Trie()
UpperCamelCase :Dict = trie.cut_text("""ABC""" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__lowerCamelCase , ["""AB""", """C"""] )
| 590 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : str ) -> str:
"""simple docstring"""
assert isinstance(__magic_name__ , __magic_name__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[int] , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :List[str] = tmp_path / """cache"""
UpperCamelCase :str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
UpperCamelCase :List[str] = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ , keep_in_memory=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : List[Any] , __magic_name__ : int , __magic_name__ : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCamelCase :Union[str, Any] = tmp_path / """cache"""
UpperCamelCase :Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
UpperCamelCase :Optional[int] = features.copy() if features else default_expected_features
UpperCamelCase :List[Any] = (
Features({feature: Value(__magic_name__ ) for feature, dtype in features.items()} ) if features is not None else None
)
UpperCamelCase :Optional[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=__magic_name__ , cache_dir=__magic_name__ ).read()
_check_sql_dataset(__magic_name__ , __magic_name__ )
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[str] ) -> List[Any]:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(__magic_name__ ) ) as con:
UpperCamelCase :Optional[int] = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Tuple , __magic_name__ : int , __magic_name__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
UpperCamelCase :Any = tmp_path / """cache"""
UpperCamelCase :int = os.path.join(__magic_name__ , """tmp.sql""" )
UpperCamelCase :Tuple = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
UpperCamelCase :List[str] = iter_sql_file(__magic_name__ )
UpperCamelCase :Optional[int] = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :List[Any] = tmp_path / """cache"""
UpperCamelCase :Optional[Any] = os.path.join(__magic_name__ , """tmp.sql""" )
UpperCamelCase :List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ ).read()
SqlDatasetWriter(__magic_name__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
UpperCamelCase :List[str] = iter_sql_file(__magic_name__ )
UpperCamelCase :Any = iter_sql_file(__magic_name__ )
for rowa, rowa in zip(__magic_name__ , __magic_name__ ):
assert rowa == rowa
@require_sqlalchemy
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int] , __magic_name__ : List[Any] , __magic_name__ : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase :int = tmp_path / """cache"""
UpperCamelCase :Dict = os.path.join(__magic_name__ , """tmp.sql""" )
UpperCamelCase :List[str] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=__magic_name__ ).read()
with pytest.raises(__magic_name__ ):
SqlDatasetWriter(__magic_name__ , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write()
| 590 | 1 |
def A__ ( lowercase: Optional[Any] ) -> tuple[int, int]:
try:
A : int =float(lowerCamelCase_ )
except ValueError:
raise ValueError('Please enter a valid number' )
A : Optional[int] =decimal - int(lowerCamelCase_ )
if fractional_part == 0:
return int(lowerCamelCase_ ), 1
else:
A : Dict =len(str(lowerCamelCase_ ).split('.' )[1] )
A : Any =int(decimal * (10**number_of_frac_digits) )
A : Tuple =10**number_of_frac_digits
A : Tuple =denominator, numerator
while True:
A : List[Any] =dividend % divisor
if remainder == 0:
break
A : Optional[int] =divisor, remainder
A : Optional[Any] =numerator / divisor, denominator / divisor
return int(lowerCamelCase_ ), int(lowerCamelCase_ )
if __name__ == "__main__":
print(f'''{decimal_to_fraction(2) = }''')
print(f'''{decimal_to_fraction(8_9.0) = }''')
print(f'''{decimal_to_fraction('67') = }''')
print(f'''{decimal_to_fraction('45.0') = }''')
print(f'''{decimal_to_fraction(1.5) = }''')
print(f'''{decimal_to_fraction('6.25') = }''')
print(f'''{decimal_to_fraction('78td') = }''')
| 305 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE : str = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : int = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 89 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowerCAmelCase = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 348 |
"""simple docstring"""
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def UpperCamelCase ( _A ) -> Any: # picklable for multiprocessing
return x.sum()
def UpperCamelCase ( _A ) -> Tuple: # picklable for multiprocessing
return i + 1
@dataclass
class UpperCamelCase :
_SCREAMING_SNAKE_CASE : int
_SCREAMING_SNAKE_CASE : str
class UpperCamelCase (__snake_case ):
def __snake_case ( self :List[str] ) ->Optional[int]:
lowercase : Optional[Any] = {}
lowercase : List[str] = []
lowercase : List[str] = 1
lowercase : Any = [1, 2]
lowercase : str = {"""a""": 1, """b""": 2}
lowercase : List[str] = {"""a""": [1, 2], """b""": [3, 4]}
lowercase : Union[str, Any] = {"""a""": {"""1""": 1}, """b""": 2}
lowercase : List[str] = {"""a""": 1, """b""": 2, """c""": 3, """d""": 4}
lowercase : List[Any] = {}
lowercase : Union[str, Any] = []
lowercase : List[str] = 2
lowercase : Tuple = [2, 3]
lowercase : Dict = {"""a""": 2, """b""": 3}
lowercase : Dict = {"""a""": [2, 3], """b""": [4, 5]}
lowercase : Optional[int] = {"""a""": {"""1""": 2}, """b""": 3}
lowercase : str = {"""a""": 2, """b""": 3, """c""": 4, """d""": 5}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ ) , __magic_name__ )
lowercase : int = 2
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
lowercase : Dict = {"""a""": np.eye(2 ), """b""": np.zeros(3 ), """c""": np.ones(2 )}
lowercase : int = {"""a""": 2, """b""": 0, """c""": 2}
lowercase : str = {
"""a""": np.eye(2 ).astype(__magic_name__ ),
"""b""": np.zeros(3 ).astype(__magic_name__ ),
"""c""": np.ones(2 ).astype(__magic_name__ ),
}
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
self.assertEqual(map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ) , __magic_name__ )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__magic_name__ , __magic_name__ , map_numpy=__magic_name__ , num_proc=__magic_name__ ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , )
with self.assertRaises(__magic_name__ ): # can't pickle a local lambda
map_nested(lambda __magic_name__ : x + 1 , __magic_name__ , num_proc=__magic_name__ )
def __snake_case ( self :Optional[Any] ) ->Optional[int]:
lowercase : Union[str, Any] = {"""a""": 1, """b""": 2}
lowercase : int = {"""a""": 3, """b""": 4}
lowercase : Optional[Any] = {"""a""": 5, """b""": 6}
lowercase : List[str] = sorted([("""a""", (1, 3, 5)), ("""b""", (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__magic_name__ , __magic_name__ , __magic_name__ ) ) , __magic_name__ )
def __snake_case ( self :Tuple ) ->Union[str, Any]:
class UpperCamelCase :
_SCREAMING_SNAKE_CASE : str = """bar"""
lowercase : Tuple = Foo()
self.assertEqual(foo.my_attr , """bar""" )
with temporary_assignment(__magic_name__ , """my_attr""" , """BAR""" ):
self.assertEqual(foo.my_attr , """BAR""" )
self.assertEqual(foo.my_attr , """bar""" )
@pytest.mark.parametrize(
"""iterable_length, num_proc, expected_num_proc""" , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def UpperCamelCase ( _A , _A , _A ) -> int:
with patch("""datasets.utils.py_utils._single_map_nested""" ) as mock_single_map_nested, patch(
"""datasets.parallel.parallel.Pool""" ) as mock_multiprocessing_pool:
lowercase : Union[str, Any] = {F"""{i}""": i for i in range(_A )}
lowercase : List[str] = map_nested(lambda _A : x + 10 , _A , num_proc=_A , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class UpperCamelCase (__snake_case ):
@require_tf
def __snake_case ( self :str ) ->Dict:
import tensorflow as tf
from tensorflow.keras import layers
lowercase : Any = layers.Dense(2 )
def gen_random_output():
lowercase : Tuple = tf.random.uniform((1, 3) )
return model(__magic_name__ ).numpy()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
lowercase : Tuple = gen_random_output()
with temp_seed(42 , set_tensorflow=__magic_name__ ):
lowercase : Optional[int] = gen_random_output()
lowercase : Any = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@require_torch
def __snake_case ( self :Union[str, Any] ) ->str:
import torch
def gen_random_output():
lowercase : List[str] = torch.nn.Linear(3 , 2 )
lowercase : List[Any] = torch.rand(1 , 3 )
return model(__magic_name__ ).detach().numpy()
with temp_seed(42 , set_pytorch=__magic_name__ ):
lowercase : Tuple = gen_random_output()
with temp_seed(42 , set_pytorch=__magic_name__ ):
lowercase : int = gen_random_output()
lowercase : List[str] = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
def __snake_case ( self :Dict ) ->int:
def gen_random_output():
return np.random.rand(1 , 3 )
with temp_seed(42 ):
lowercase : str = gen_random_output()
with temp_seed(42 ):
lowercase : Tuple = gen_random_output()
lowercase : Any = gen_random_output()
np.testing.assert_equal(__magic_name__ , __magic_name__ )
self.assertGreater(np.abs(outa - outa ).sum() , 0 )
@pytest.mark.parametrize("""input_data""" , [{}] )
def UpperCamelCase ( _A ) -> List[Any]:
lowercase : Optional[int] = NestedDataStructure(_A ).data
assert output_data == input_data
@pytest.mark.parametrize(
"""data, expected_output""" , [
({}, []),
([], []),
("""foo""", ["""foo"""]),
(["""foo""", """bar"""], ["""foo""", """bar"""]),
([["""foo""", """bar"""]], ["""foo""", """bar"""]),
([[["""foo"""], ["""bar"""]]], ["""foo""", """bar"""]),
([[["""foo"""], """bar"""]], ["""foo""", """bar"""]),
({"""a""": 1, """b""": 2}, [1, 2]),
({"""a""": [1, 2], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[1, 2]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[[3], [4]]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [[3, 4]]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, 4]}, [1, 2, 3, 4]),
({"""a""": [[[1], [2]]], """b""": [3, [4]]}, [1, 2, 3, 4]),
({"""a""": {"""1""": 1}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": 2}, [1, 2]),
({"""a""": {"""1""": [1]}, """b""": [2]}, [1, 2]),
] , )
def UpperCamelCase ( _A , _A ) -> Any:
lowercase : int = NestedDataStructure(_A ).flatten()
assert output == expected_output
def UpperCamelCase ( ) -> Tuple:
lowercase : str = A(x=1 , y="""foobar""" )
lowercase : Optional[int] = {"""x""": 1, """y""": """foobar"""}
assert asdict(_A ) == expected_output
lowercase : Dict = {"""a""": {"""b""": A(x=10 , y="""foo""" )}, """c""": [A(x=20 , y="""bar""" )]}
lowercase : Optional[Any] = {"""a""": {"""b""": {"""x""": 10, """y""": """foo"""}}, """c""": [{"""x""": 20, """y""": """bar"""}]}
assert asdict(_A ) == expected_output
with pytest.raises(_A ):
asdict([1, A(x=10 , y="""foo""" )] )
def UpperCamelCase ( _A ) -> int:
return text.split()
def UpperCamelCase ( _A ) -> Any:
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def UpperCamelCase ( ) -> Optional[int]:
with Pool(2 ) as pool:
lowercase : int = list(iflatmap_unordered(_A , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_A ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
lowercase : Union[str, Any] = list(iflatmap_unordered(_A , _split_text , kwargs_iterable=[{"""text""": """hello there"""}] * 10 ) )
assert out.count("""hello""" ) == 10
assert out.count("""there""" ) == 10
assert len(_A ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
lowercase : int = []
for yield_time, content in iflatmap_unordered(
_A , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{"""content""": """a"""}, {"""content""": """b"""}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(_A )
assert out.count("""a""" ) == 2
assert out.count("""b""" ) == 2
assert len(_A ) == 4
| 348 | 1 |
'''simple docstring'''
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class __magic_name__ :
def __init__( self , snake_case_ , ):
lowercase =parent
lowercase =13
lowercase =7
lowercase =30
lowercase =self.seq_length + self.mem_len
lowercase =15
lowercase =True
lowercase =True
lowercase =99
lowercase =[10, 50, 80]
lowercase =32
lowercase =32
lowercase =4
lowercase =8
lowercase =1_28
lowercase =2
lowercase =2
lowercase =None
lowercase =1
lowercase =0
lowercase =3
lowercase =self.vocab_size - 1
lowercase =0.01
def _A( self ):
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =None
if self.use_labels:
lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase =TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def _A( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFTransfoXLModel(__lowerCamelCase )
lowercase , lowercase =model(__lowerCamelCase ).to_tuple()
lowercase ={'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase , lowercase =model(__lowerCamelCase ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFTransfoXLLMHeadModel(__lowerCamelCase )
lowercase , lowercase =model(__lowerCamelCase ).to_tuple()
lowercase ={'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase , lowercase =model(__lowerCamelCase ).to_tuple()
lowercase , lowercase =model([input_ids_a, mems_a] ).to_tuple()
lowercase ={'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase , lowercase =model(__lowerCamelCase ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
lowercase =TFTransfoXLForSequenceClassification(__lowerCamelCase )
lowercase =model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _A( self ):
lowercase =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase)) =config_and_inputs
lowercase ={'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class __magic_name__ ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase__ = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
UpperCamelCase__ = () if is_tf_available() else ()
UpperCamelCase__ = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
UpperCamelCase__ = False
def _A( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def _A( self ):
lowercase =TFTransfoXLModelTester(self )
lowercase =ConfigTester(self , config_class=__lowerCamelCase , d_embed=37 )
def _A( self ):
self.config_tester.run_common_tests()
def _A( self ):
self.model_tester.set_seed()
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*__lowerCamelCase )
def _A( self ):
self.model_tester.set_seed()
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*__lowerCamelCase )
def _A( self ):
lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*__lowerCamelCase )
def _A( self ):
lowercase , lowercase =self.model_tester.prepare_config_and_inputs_for_common()
lowercase =[TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase =model_class(__lowerCamelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase =model.get_output_embeddings()
assert isinstance(__lowerCamelCase , tf.keras.layers.Layer )
lowercase =model.get_bias()
assert name is None
else:
lowercase =model.get_output_embeddings()
assert x is None
lowercase =model.get_bias()
assert name is None
def _A( self ):
pass
@slow
def _A( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase =TFTransfoXLModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def _A( self ):
pass
@require_tf
class __magic_name__ ( unittest.TestCase ):
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def _A( self ):
lowercase =TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase =tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase =[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase =model.generate(__lowerCamelCase , max_length=2_00 , do_sample=__lowerCamelCase )
self.assertListEqual(output_ids[0].numpy().tolist() , __lowerCamelCase )
| 72 |
from __future__ import annotations
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[int]:
# Checks if the entire collection has been sorted
if len(lowerCAmelCase_ ) <= 1 or n <= 1:
return
insert_next(lowerCAmelCase_ , n - 1 )
rec_insertion_sort(lowerCAmelCase_ , n - 1 )
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Dict:
# Checks order between adjacent elements
if index >= len(lowerCAmelCase_ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
UpperCAmelCase , UpperCAmelCase = (
collection[index],
collection[index - 1],
)
insert_next(lowerCAmelCase_ , index + 1 )
if __name__ == "__main__":
__a = input("""Enter integers separated by spaces: """)
__a = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 377 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _lowerCAmelCase = 10_00 )-> int:
__UpperCAmelCase , __UpperCAmelCase = 1, 1
__UpperCAmelCase = []
for i in range(1 , n + 1 ):
__UpperCAmelCase = prev_numerator + 2 * prev_denominator
__UpperCAmelCase = prev_numerator + prev_denominator
if len(str(_lowerCAmelCase ) ) > len(str(_lowerCAmelCase ) ):
result.append(_lowerCAmelCase )
__UpperCAmelCase = numerator
__UpperCAmelCase = denominator
return len(_lowerCAmelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 617 |
'''simple docstring'''
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _lowerCAmelCase ( _lowerCAmelCase = 3 )-> qiskit.result.counts.Counts:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(_lowerCAmelCase ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
__UpperCAmelCase = QuantumRegister(_lowerCAmelCase , 'qr' )
__UpperCAmelCase = ClassicalRegister(_lowerCAmelCase , 'cr' )
__UpperCAmelCase = QuantumCircuit(_lowerCAmelCase , _lowerCAmelCase )
__UpperCAmelCase = number_of_qubits
for i in range(_lowerCAmelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCAmelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCAmelCase , _lowerCAmelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCAmelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCAmelCase , _lowerCAmelCase )
# simulate with 10000 shots
__UpperCAmelCase = Aer.get_backend('qasm_simulator' )
__UpperCAmelCase = execute(_lowerCAmelCase , _lowerCAmelCase , shots=1_00_00 )
return job.result().get_counts(_lowerCAmelCase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
)
| 617 | 1 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ = 1_0_0 ) -> str:
"""simple docstring"""
a = set()
a = 0
a = n + 1 # maximum limit
for a in range(2, lowerCamelCase__ ):
for b in range(2, lowerCamelCase__ ):
a = a**b # calculates the current power
collect_powers.add(lowerCamelCase__ ) # adds the result to the set
return len(lowerCamelCase__ )
if __name__ == "__main__":
print("""Number of terms """, solution(int(str(input()).strip())))
| 387 |
import random
from typing import Any
def __lowerCamelCase ( lowerCamelCase__ : list ):
'''simple docstring'''
for _ in range(len(lowerCamelCase__ ) ):
lowerCamelCase = random.randint(0 , len(lowerCamelCase__ ) - 1 )
lowerCamelCase = random.randint(0 , len(lowerCamelCase__ ) - 1 )
lowerCamelCase , lowerCamelCase = data[b], data[a]
return data
if __name__ == "__main__":
UpperCAmelCase : str = [0, 1, 2, 3, 4, 5, 6, 7]
UpperCAmelCase : Union[str, Any] = ["python", "says", "hello", "!"]
print("Fisher-Yates Shuffle:")
print("List", integers, strings)
print("FY Shuffle", fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 457 | 0 |
"""simple docstring"""
def lowerCAmelCase (__UpperCamelCase : int ):
"""simple docstring"""
__UpperCamelCase =0
while num > 0:
digit_sum += num % 1_0
num //= 1_0
return digit_sum
def lowerCAmelCase (__UpperCamelCase : int = 1_0_0 ):
"""simple docstring"""
__UpperCamelCase =1
__UpperCamelCase =2
for i in range(2 , max_n + 1 ):
__UpperCamelCase =pre_numerator
__UpperCamelCase =2 * i // 3 if i % 3 == 0 else 1
__UpperCamelCase =cur_numerator
__UpperCamelCase =e_cont * pre_numerator + temp
return sum_digits(__UpperCamelCase )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 296 | """simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
'''google/vit-base-patch16-224''': '''https://huggingface.co/vit-base-patch16-224/resolve/main/config.json''',
# See all ViT models at https://huggingface.co/models?filter=vit
}
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = '''vit'''
def __init__( self : Optional[Any] , UpperCamelCase__ : Tuple=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Union[str, Any]=3072 , UpperCamelCase__ : List[Any]="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Union[str, Any]=1E-12 , UpperCamelCase__ : Union[str, Any]=224 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[int]=16 , **UpperCamelCase__ : Optional[int] , ) -> Tuple:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
__UpperCamelCase =hidden_size
__UpperCamelCase =num_hidden_layers
__UpperCamelCase =num_attention_heads
__UpperCamelCase =intermediate_size
__UpperCamelCase =hidden_act
__UpperCamelCase =hidden_dropout_prob
__UpperCamelCase =attention_probs_dropout_prob
__UpperCamelCase =initializer_range
__UpperCamelCase =layer_norm_eps
__UpperCamelCase =image_size
__UpperCamelCase =patch_size
__UpperCamelCase =num_channels
__UpperCamelCase =qkv_bias
__UpperCamelCase =encoder_stride
class _lowercase ( __a ):
"""simple docstring"""
lowercase__ = version.parse('''1.11''' )
@property
def UpperCAmelCase_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def UpperCAmelCase_ ( self : str ) -> float:
'''simple docstring'''
return 1E-4
| 296 | 1 |
'''simple docstring'''
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase ( __lowerCAmelCase , unittest.TestCase):
__lowercase : Tuple = FunnelTokenizer
__lowercase : int = FunnelTokenizerFast
__lowercase : Optional[int] = True
__lowercase : Union[str, Any] = True
def lowerCAmelCase ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
__snake_case = [
'''<unk>''',
'''<cls>''',
'''<sep>''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , **__SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def lowerCAmelCase ( self , __SCREAMING_SNAKE_CASE ) -> str:
'''simple docstring'''
__snake_case = '''UNwant\u00E9d,running'''
__snake_case = '''unwanted, running'''
return input_text, output_text
def lowerCAmelCase ( self ) -> int:
'''simple docstring'''
__snake_case = self.tokenizer_class(self.vocab_file )
__snake_case = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__SCREAMING_SNAKE_CASE , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , [7, 4, 5, 10, 8, 9] )
def lowerCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
__snake_case = self.get_tokenizers(do_lower_case=__SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
__snake_case = tokenizer('''UNwant\u00E9d,running''' )
__snake_case = len(inputs['''input_ids'''] ) - 1
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len )
__snake_case = tokenizer('''UNwant\u00E9d,running''' , '''UNwant\u00E9d,running''' )
self.assertListEqual(inputs['''token_type_ids'''] , [2] + [0] * sentence_len + [1] * sentence_len )
| 24 |
'''simple docstring'''
def _UpperCamelCase (_lowerCamelCase : int )-> int:
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
__snake_case = 1
__snake_case = 1
while repunit:
__snake_case = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase (_lowerCamelCase : int = 1_00_00_00 )-> int:
'''simple docstring'''
__snake_case = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_lowerCamelCase ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"""{solution() = }""")
| 24 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_nllb import NllbTokenizer
else:
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : Dict = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
__UpperCamelCase : Optional[int] = {
"""vocab_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/nllb-200-distilled-600M""": (
"""https://huggingface.co/facebook/nllb-200-distilled-600M/resolve/main/tokenizer.json"""
),
},
}
__UpperCamelCase : Optional[Any] = {
"""facebook/nllb-large-en-ro""": 1024,
"""facebook/nllb-200-distilled-600M""": 1024,
}
# fmt: off
__UpperCamelCase : Optional[Any] = ["""ace_Arab""", """ace_Latn""", """acm_Arab""", """acq_Arab""", """aeb_Arab""", """afr_Latn""", """ajp_Arab""", """aka_Latn""", """amh_Ethi""", """apc_Arab""", """arb_Arab""", """ars_Arab""", """ary_Arab""", """arz_Arab""", """asm_Beng""", """ast_Latn""", """awa_Deva""", """ayr_Latn""", """azb_Arab""", """azj_Latn""", """bak_Cyrl""", """bam_Latn""", """ban_Latn""", """bel_Cyrl""", """bem_Latn""", """ben_Beng""", """bho_Deva""", """bjn_Arab""", """bjn_Latn""", """bod_Tibt""", """bos_Latn""", """bug_Latn""", """bul_Cyrl""", """cat_Latn""", """ceb_Latn""", """ces_Latn""", """cjk_Latn""", """ckb_Arab""", """crh_Latn""", """cym_Latn""", """dan_Latn""", """deu_Latn""", """dik_Latn""", """dyu_Latn""", """dzo_Tibt""", """ell_Grek""", """eng_Latn""", """epo_Latn""", """est_Latn""", """eus_Latn""", """ewe_Latn""", """fao_Latn""", """pes_Arab""", """fij_Latn""", """fin_Latn""", """fon_Latn""", """fra_Latn""", """fur_Latn""", """fuv_Latn""", """gla_Latn""", """gle_Latn""", """glg_Latn""", """grn_Latn""", """guj_Gujr""", """hat_Latn""", """hau_Latn""", """heb_Hebr""", """hin_Deva""", """hne_Deva""", """hrv_Latn""", """hun_Latn""", """hye_Armn""", """ibo_Latn""", """ilo_Latn""", """ind_Latn""", """isl_Latn""", """ita_Latn""", """jav_Latn""", """jpn_Jpan""", """kab_Latn""", """kac_Latn""", """kam_Latn""", """kan_Knda""", """kas_Arab""", """kas_Deva""", """kat_Geor""", """knc_Arab""", """knc_Latn""", """kaz_Cyrl""", """kbp_Latn""", """kea_Latn""", """khm_Khmr""", """kik_Latn""", """kin_Latn""", """kir_Cyrl""", """kmb_Latn""", """kon_Latn""", """kor_Hang""", """kmr_Latn""", """lao_Laoo""", """lvs_Latn""", """lij_Latn""", """lim_Latn""", """lin_Latn""", """lit_Latn""", """lmo_Latn""", """ltg_Latn""", """ltz_Latn""", """lua_Latn""", """lug_Latn""", """luo_Latn""", """lus_Latn""", """mag_Deva""", """mai_Deva""", """mal_Mlym""", """mar_Deva""", """min_Latn""", """mkd_Cyrl""", """plt_Latn""", """mlt_Latn""", """mni_Beng""", """khk_Cyrl""", """mos_Latn""", """mri_Latn""", """zsm_Latn""", """mya_Mymr""", """nld_Latn""", """nno_Latn""", """nob_Latn""", """npi_Deva""", """nso_Latn""", """nus_Latn""", """nya_Latn""", """oci_Latn""", """gaz_Latn""", """ory_Orya""", """pag_Latn""", """pan_Guru""", """pap_Latn""", """pol_Latn""", """por_Latn""", """prs_Arab""", """pbt_Arab""", """quy_Latn""", """ron_Latn""", """run_Latn""", """rus_Cyrl""", """sag_Latn""", """san_Deva""", """sat_Beng""", """scn_Latn""", """shn_Mymr""", """sin_Sinh""", """slk_Latn""", """slv_Latn""", """smo_Latn""", """sna_Latn""", """snd_Arab""", """som_Latn""", """sot_Latn""", """spa_Latn""", """als_Latn""", """srd_Latn""", """srp_Cyrl""", """ssw_Latn""", """sun_Latn""", """swe_Latn""", """swh_Latn""", """szl_Latn""", """tam_Taml""", """tat_Cyrl""", """tel_Telu""", """tgk_Cyrl""", """tgl_Latn""", """tha_Thai""", """tir_Ethi""", """taq_Latn""", """taq_Tfng""", """tpi_Latn""", """tsn_Latn""", """tso_Latn""", """tuk_Latn""", """tum_Latn""", """tur_Latn""", """twi_Latn""", """tzm_Tfng""", """uig_Arab""", """ukr_Cyrl""", """umb_Latn""", """urd_Arab""", """uzn_Latn""", """vec_Latn""", """vie_Latn""", """war_Latn""", """wol_Latn""", """xho_Latn""", """ydd_Hebr""", """yor_Latn""", """yue_Hant""", """zho_Hans""", """zho_Hant""", """zul_Latn"""]
class __SCREAMING_SNAKE_CASE( a_ ):
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = ["input_ids", "attention_mask"]
_UpperCAmelCase = NllbTokenizer
_UpperCAmelCase = []
_UpperCAmelCase = []
def __init__( self: List[Any] , UpperCamelCase: Dict=None , UpperCamelCase: List[Any]=None , UpperCamelCase: str="<s>" , UpperCamelCase: int="</s>" , UpperCamelCase: List[str]="</s>" , UpperCamelCase: Optional[int]="<s>" , UpperCamelCase: Union[str, Any]="<unk>" , UpperCamelCase: int="<pad>" , UpperCamelCase: List[str]="<mask>" , UpperCamelCase: Tuple=None , UpperCamelCase: Union[str, Any]=None , UpperCamelCase: Optional[Any]=None , UpperCamelCase: List[str]=False , **UpperCamelCase: Optional[int] , ) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
snake_case__ = legacy_behaviour
super().__init__(
vocab_file=UpperCamelCase , tokenizer_file=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , src_lang=UpperCamelCase , tgt_lang=UpperCamelCase , additional_special_tokens=UpperCamelCase , legacy_behaviour=UpperCamelCase , **UpperCamelCase , )
snake_case__ = vocab_file
snake_case__ = False if not self.vocab_file else True
snake_case__ = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
snake_case__ = {
lang_code: self.convert_tokens_to_ids(UpperCamelCase ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
snake_case__ = src_lang if src_lang is not None else 'eng_Latn'
snake_case__ = self.convert_tokens_to_ids(self._src_lang )
snake_case__ = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def lowerCAmelCase_ ( self: int ) -> str:
return self._src_lang
@src_lang.setter
def lowerCAmelCase_ ( self: Dict , UpperCamelCase: str ) -> None:
snake_case__ = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def lowerCAmelCase_ ( self: str , UpperCamelCase: List[int] , UpperCamelCase: Optional[List[int]] = None ) -> List[int]:
snake_case__ = [self.sep_token_id]
snake_case__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase_ ( self: Union[str, Any] , UpperCamelCase: List[Any] , UpperCamelCase: str , UpperCamelCase: Optional[str] , UpperCamelCase: Optional[str] , **UpperCamelCase: Dict ) -> Tuple:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
snake_case__ = src_lang
snake_case__ = self(UpperCamelCase , add_special_tokens=UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase )
snake_case__ = self.convert_tokens_to_ids(UpperCamelCase )
snake_case__ = tgt_lang_id
return inputs
def lowerCAmelCase_ ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: str = "eng_Latn" , UpperCamelCase: Optional[List[str]] = None , UpperCamelCase: str = "fra_Latn" , **UpperCamelCase: Dict , ) -> BatchEncoding:
snake_case__ = src_lang
snake_case__ = tgt_lang
return super().prepare_seqaseq_batch(UpperCamelCase , UpperCamelCase , **UpperCamelCase )
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[int]:
return self.set_src_lang_special_tokens(self.src_lang )
def lowerCAmelCase_ ( self: List[Any] ) -> int:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def lowerCAmelCase_ ( self: List[str] , UpperCamelCase: List[str] ) -> None:
snake_case__ = self.convert_tokens_to_ids(UpperCamelCase )
if self.legacy_behaviour:
snake_case__ = []
snake_case__ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ = [self.cur_lang_code]
snake_case__ = [self.eos_token_id]
snake_case__ = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self: Optional[Any] , UpperCamelCase: str ) -> None:
snake_case__ = self.convert_tokens_to_ids(UpperCamelCase )
if self.legacy_behaviour:
snake_case__ = []
snake_case__ = [self.eos_token_id, self.cur_lang_code]
else:
snake_case__ = [self.cur_lang_code]
snake_case__ = [self.eos_token_id]
snake_case__ = self.convert_ids_to_tokens(self.prefix_tokens )
snake_case__ = self.convert_ids_to_tokens(self.suffix_tokens )
snake_case__ = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def lowerCAmelCase_ ( self: List[Any] , UpperCamelCase: str , UpperCamelCase: Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory.''' )
return
snake_case__ = os.path.join(
UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,)
| 372 |
__UpperCamelCase : List[str] = 256
# Modulus to hash a string
__UpperCamelCase : int = 1000003
def a_ ( _A , _A ) -> bool:
"""simple docstring"""
snake_case__ = len(_A )
snake_case__ = len(_A )
if p_len > t_len:
return False
snake_case__ = 0
snake_case__ = 0
snake_case__ = 1
# Calculating the hash of pattern and substring of text
for i in range(_A ):
snake_case__ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
snake_case__ = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
snake_case__ = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
snake_case__ = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def a_ ( ) -> None:
"""simple docstring"""
snake_case__ = 'abc1abc12'
snake_case__ = 'alskfjaldsabc1abc1abc12k23adsfabcabc'
snake_case__ = 'alskfjaldsk23adsfabcabc'
assert rabin_karp(_A , _A ) and not rabin_karp(_A , _A )
# Test 2)
snake_case__ = 'ABABX'
snake_case__ = 'ABABZABABYABABX'
assert rabin_karp(_A , _A )
# Test 3)
snake_case__ = 'AAAB'
snake_case__ = 'ABAAAAAB'
assert rabin_karp(_A , _A )
# Test 4)
snake_case__ = 'abcdabcy'
snake_case__ = 'abcxabcdabxabcdabcdabcy'
assert rabin_karp(_A , _A )
# Test 5)
snake_case__ = 'Lü'
snake_case__ = 'Lüsai'
assert rabin_karp(_A , _A )
snake_case__ = 'Lue'
assert not rabin_karp(_A , _A )
print('Success.' )
if __name__ == "__main__":
test_rabin_karp()
| 372 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ = k_size // 2
lowerCAmelCase__ , lowerCAmelCase__ = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase__ = 1 / (2 * pi * sigma) * exp(-(square(snake_case__ ) + square(snake_case__ )) / (2 * square(snake_case__ )) )
return g
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ) -> str:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase__ = height - k_size + 1
lowerCAmelCase__ = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase__ = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase__ = 0
for i, j in product(range(snake_case__ ) , range(snake_case__ ) ):
lowerCAmelCase__ = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase__ = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase__ = gen_gaussian_kernel(snake_case__ , snake_case__ )
lowerCAmelCase__ = ravel(snake_case__ )
# reshape and get the dst image
lowerCAmelCase__ = dot(snake_case__ , snake_case__ ).reshape(snake_case__ , snake_case__ ).astype(snake_case__ )
return dst
if __name__ == "__main__":
# read original image
_lowerCAmelCase : List[str] = imread(R"../image_data/lena.jpg")
# turn image in gray scale value
_lowerCAmelCase : Optional[Any] = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
_lowerCAmelCase : Optional[int] = gaussian_filter(gray, 3, sigma=1)
_lowerCAmelCase : Tuple = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 193 |
from typing import Dict
from .base import GenericTensor, Pipeline
class __snake_case ( SCREAMING_SNAKE_CASE ):
def SCREAMING_SNAKE_CASE_ ( self ,a_=None ,a_=None ,a_=None ,**a_ ):
"""simple docstring"""
if tokenize_kwargs is None:
lowerCAmelCase__ = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowerCAmelCase__ = truncation
lowerCAmelCase__ = tokenize_kwargs
lowerCAmelCase__ = {}
if return_tensors is not None:
lowerCAmelCase__ = return_tensors
return preprocess_params, {}, postprocess_params
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,**a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.framework
lowerCAmelCase__ = self.tokenizer(a_ ,return_tensors=a_ ,**a_ )
return model_inputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.model(**a_ )
return model_outputs
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_=False ):
"""simple docstring"""
# [0] is the first available tensor, logits or last_hidden_state.
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self ,*a_ ,**a_ ):
"""simple docstring"""
return super().__call__(*a_ ,**a_ )
| 193 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
lowerCamelCase__ : Any = TFCamembertModel.from_pretrained('''jplu/tf-camembert-base''' )
lowerCamelCase__ : Tuple = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] ,dtype=tf.intaa ,) # J'aime le camembert !"
lowerCamelCase__ : Tuple = model(__UpperCAmelCase )['''last_hidden_state''']
lowerCamelCase__ : str = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape ,__UpperCAmelCase )
# compare the actual values for a slice.
lowerCamelCase__ : List[Any] = tf.convert_to_tensor(
[[[-0.0_254, 0.0_235, 0.1_027], [0.0_606, -0.1_811, -0.0_418], [-0.1_561, -0.1_127, 0.2_687]]] ,dtype=tf.floataa ,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1E-4 ) )
| 707 | """simple docstring"""
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase ):
def lowercase_ ( self :int ) -> List[str]:
"""simple docstring"""
lowerCamelCase__ : Any = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__UpperCAmelCase ,'''hidden_sizes''' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase ,'''num_attention_heads''' ) )
self.parent.assertTrue(hasattr(__UpperCAmelCase ,'''num_encoder_blocks''' ) )
class __SCREAMING_SNAKE_CASE :
def __init__( self :Union[str, Any] ,__UpperCAmelCase :str ,__UpperCAmelCase :Optional[int]=13 ,__UpperCAmelCase :Any=64 ,__UpperCAmelCase :List[Any]=3 ,__UpperCAmelCase :str=4 ,__UpperCAmelCase :Any=[2, 2, 2, 2] ,__UpperCAmelCase :List[Any]=[8, 4, 2, 1] ,__UpperCAmelCase :Dict=[16, 32, 64, 1_28] ,__UpperCAmelCase :Any=[1, 4, 8, 16] ,__UpperCAmelCase :int=[1, 2, 4, 8] ,__UpperCAmelCase :Union[str, Any]=True ,__UpperCAmelCase :Dict=True ,__UpperCAmelCase :Optional[int]="gelu" ,__UpperCAmelCase :Tuple=0.1 ,__UpperCAmelCase :List[Any]=0.1 ,__UpperCAmelCase :List[Any]=0.02 ,__UpperCAmelCase :str=3 ,__UpperCAmelCase :Tuple=None ,) -> int:
"""simple docstring"""
lowerCamelCase__ : List[Any] = parent
lowerCamelCase__ : List[Any] = batch_size
lowerCamelCase__ : Any = image_size
lowerCamelCase__ : Tuple = num_channels
lowerCamelCase__ : Union[str, Any] = num_encoder_blocks
lowerCamelCase__ : Union[str, Any] = sr_ratios
lowerCamelCase__ : int = depths
lowerCamelCase__ : Optional[Any] = hidden_sizes
lowerCamelCase__ : List[Any] = downsampling_rates
lowerCamelCase__ : Dict = num_attention_heads
lowerCamelCase__ : Tuple = is_training
lowerCamelCase__ : Union[str, Any] = use_labels
lowerCamelCase__ : Optional[Any] = hidden_act
lowerCamelCase__ : Optional[Any] = hidden_dropout_prob
lowerCamelCase__ : str = attention_probs_dropout_prob
lowerCamelCase__ : Union[str, Any] = initializer_range
lowerCamelCase__ : Optional[Any] = num_labels
lowerCamelCase__ : Any = scope
def lowercase_ ( self :Dict ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase__ : Optional[Any] = None
if self.use_labels:
lowerCamelCase__ : str = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
lowerCamelCase__ : Any = self.get_config()
return config, pixel_values, labels
def lowercase_ ( self :Tuple ) -> Any:
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def lowercase_ ( self :Any ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Any ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = SegformerModel(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = model(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :str ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Tuple = self.num_labels
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : Tuple = model(__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCamelCase__ : List[Any] = model(__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def lowercase_ ( self :str ,__UpperCAmelCase :Dict ,__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Tuple ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Dict = 1
lowerCamelCase__ : Union[str, Any] = SegformerForSemanticSegmentation(config=__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
lowerCamelCase__ : str = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = model(__UpperCAmelCase ,labels=__UpperCAmelCase )
self.parent.assertGreater(result.loss ,0.0 )
def lowercase_ ( self :Union[str, Any] ) -> int:
"""simple docstring"""
lowerCamelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[str] = config_and_inputs
lowerCamelCase__ : int = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
UpperCAmelCase = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
UpperCAmelCase = False
def lowercase_ ( self :int ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = SegformerModelTester(self )
lowerCamelCase__ : int = SegformerConfigTester(self ,config_class=__UpperCAmelCase )
def lowercase_ ( self :Tuple ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowercase_ ( self :Dict ) -> Tuple:
"""simple docstring"""
lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCAmelCase )
def lowercase_ ( self :Optional[int] ) -> str:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__UpperCAmelCase )
def lowercase_ ( self :Any ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__UpperCAmelCase )
@unittest.skip('''SegFormer does not use inputs_embeds''' )
def lowercase_ ( self :int ) -> Union[str, Any]:
"""simple docstring"""
pass
@unittest.skip('''SegFormer does not have get_input_embeddings method and get_output_embeddings methods''' )
def lowercase_ ( self :Optional[int] ) -> List[str]:
"""simple docstring"""
pass
def lowercase_ ( self :Any ) -> str:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : List[str] = model_class(__UpperCAmelCase )
lowerCamelCase__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__ : Union[str, Any] = [*signature.parameters.keys()]
lowerCamelCase__ : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__UpperCAmelCase )
def lowercase_ ( self :List[str] ) -> int:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : int = True
for model_class in self.all_model_classes:
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[str] = False
lowerCamelCase__ : Tuple = True
lowerCamelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Union[str, Any] = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = outputs.attentions
lowerCamelCase__ : int = sum(self.model_tester.depths )
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCamelCase__ : List[Any] = True
lowerCamelCase__ : List[Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Dict = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Union[str, Any] = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : List[str] = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
lowerCamelCase__ : Dict = (self.model_tester.image_size // 32) ** 2
lowerCamelCase__ : Optional[Any] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
lowerCamelCase__ : Union[str, Any] = len(__UpperCAmelCase )
# Check attention is always last and order is fine
lowerCamelCase__ : Optional[int] = True
lowerCamelCase__ : Any = True
lowerCamelCase__ : Any = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : Any = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
self.assertEqual(out_len + 1 ,len(__UpperCAmelCase ) )
lowerCamelCase__ : Any = outputs.attentions
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# verify the first attentions (first block, first layer)
lowerCamelCase__ : Tuple = (self.model_tester.image_size // 4) ** 2
lowerCamelCase__ : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def lowercase_ ( self :int ) -> Optional[Any]:
"""simple docstring"""
def check_hidden_states_output(__UpperCAmelCase :Union[str, Any] ,__UpperCAmelCase :Any ,__UpperCAmelCase :Optional[Any] ):
lowerCamelCase__ : List[str] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.eval()
with torch.no_grad():
lowerCamelCase__ : str = model(**self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ) )
lowerCamelCase__ : Any = outputs.hidden_states
lowerCamelCase__ : Optional[int] = self.model_tester.num_encoder_blocks
self.assertEqual(len(__UpperCAmelCase ) ,__UpperCAmelCase )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
lowerCamelCase__ , lowerCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__ : Any = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__ : Optional[Any] = True
check_hidden_states_output(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def lowercase_ ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
if not self.model_tester.is_training:
return
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCamelCase__ : Union[str, Any] = True
for model_class in self.all_model_classes:
if model_class in get_values(__UpperCAmelCase ):
continue
lowerCamelCase__ : Union[str, Any] = model_class(__UpperCAmelCase )
model.to(__UpperCAmelCase )
model.train()
lowerCamelCase__ : str = self._prepare_for_class(__UpperCAmelCase ,__UpperCAmelCase ,return_labels=__UpperCAmelCase )
lowerCamelCase__ : List[str] = model(**__UpperCAmelCase ).loss
loss.backward()
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase_ ( self :Optional[Any] ) -> Optional[int]:
"""simple docstring"""
pass
@slow
def lowercase_ ( self :Tuple ) -> Tuple:
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : List[Any] = SegformerModel.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : List[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowercase_ ( self :Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=__UpperCAmelCase ,align=__UpperCAmelCase ,do_random_crop=__UpperCAmelCase )
lowerCamelCase__ : List[Any] = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' )
lowerCamelCase__ : Optional[Any] = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[Any] = model(__UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
lowerCamelCase__ : Dict = torch.tensor(
[
[[-4.6_310, -5.5_232, -6.2_356], [-5.1_921, -6.1_444, -6.5_996], [-5.4_424, -6.2_790, -6.7_574]],
[[-12.1_391, -13.3_122, -13.9_554], [-12.8_732, -13.9_352, -14.3_563], [-12.9_438, -13.8_226, -14.2_513]],
[[-12.5_134, -13.4_686, -14.4_915], [-12.8_669, -14.4_343, -14.7_758], [-13.2_523, -14.5_819, -15.0_694]],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__UpperCAmelCase ,atol=1E-4 ) )
@slow
def lowercase_ ( self :List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase__ : str = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=__UpperCAmelCase ,align=__UpperCAmelCase ,do_random_crop=__UpperCAmelCase )
lowerCamelCase__ : Dict = SegformerForSemanticSegmentation.from_pretrained(
'''nvidia/segformer-b1-finetuned-cityscapes-1024-1024''' ).to(__UpperCAmelCase )
lowerCamelCase__ : int = prepare_img()
lowerCamelCase__ : Any = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' )
lowerCamelCase__ : Dict = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : Optional[int] = model(__UpperCAmelCase )
lowerCamelCase__ : Dict = torch.Size((1, model.config.num_labels, 1_28, 1_28) )
self.assertEqual(outputs.logits.shape ,__UpperCAmelCase )
lowerCamelCase__ : List[str] = torch.tensor(
[
[[-13.5_748, -13.9_111, -12.6_500], [-14.3_500, -15.3_683, -14.2_328], [-14.7_532, -16.0_424, -15.6_087]],
[[-17.1_651, -15.8_725, -12.9_653], [-17.2_580, -17.3_718, -14.8_223], [-16.6_058, -16.8_783, -16.7_452]],
[[-3.6_456, -3.0_209, -1.4_203], [-3.0_797, -3.1_959, -2.0_000], [-1.8_757, -1.9_217, -1.6_997]],
] ).to(__UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__UpperCAmelCase ,atol=1E-1 ) )
@slow
def lowercase_ ( self :int ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = SegformerImageProcessor(
image_scale=(5_12, 5_12) ,keep_ratio=__UpperCAmelCase ,align=__UpperCAmelCase ,do_random_crop=__UpperCAmelCase )
lowerCamelCase__ : Tuple = SegformerForSemanticSegmentation.from_pretrained('''nvidia/segformer-b0-finetuned-ade-512-512''' ).to(
__UpperCAmelCase )
lowerCamelCase__ : List[str] = prepare_img()
lowerCamelCase__ : Optional[Any] = image_processor(images=__UpperCAmelCase ,return_tensors='''pt''' )
lowerCamelCase__ : int = encoded_inputs.pixel_values.to(__UpperCAmelCase )
with torch.no_grad():
lowerCamelCase__ : int = model(__UpperCAmelCase )
lowerCamelCase__ : Dict = outputs.logits.detach().cpu()
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase ,target_sizes=[(5_00, 3_00)] )
lowerCamelCase__ : Optional[int] = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape ,__UpperCAmelCase )
lowerCamelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=__UpperCAmelCase )
lowerCamelCase__ : Optional[int] = torch.Size((1_28, 1_28) )
self.assertEqual(segmentation[0].shape ,__UpperCAmelCase )
| 121 | 0 |
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE: Tuple = parse(importlib.metadata.version('''torch'''))
def _a ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )-> Optional[int]:
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'''`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}''' )
SCREAMING_SNAKE_CASE_ = STR_OPERATION_TO_FUNC[operation]
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ = parse(importlib.metadata.version(UpperCamelCase__ ) )
return operation(UpperCamelCase__ , parse(UpperCamelCase__ ) )
def _a ( lowerCAmelCase , lowerCAmelCase )-> List[str]:
return compare_versions(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) | 360 |
"""simple docstring"""
import os
import sys
UpperCamelCase : Optional[int] = os.path.join(os.path.dirname(__file__), "src")
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
UpperCamelCase : Dict = [
"torch",
"numpy",
"tokenizers",
"filelock",
"requests",
"tqdm",
"regex",
"sentencepiece",
"sacremoses",
"importlib_metadata",
"huggingface_hub",
]
@add_start_docstrings(AutoConfig.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Dict:
"""simple docstring"""
return AutoConfig.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoTokenizer.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModel.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> str:
"""simple docstring"""
return AutoModel.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
"""simple docstring"""
return AutoModelForCausalLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> Optional[Any]:
"""simple docstring"""
return AutoModelForMaskedLM.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
"""simple docstring"""
return AutoModelForSequenceClassification.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __snake_case ( *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
"""simple docstring"""
return AutoModelForQuestionAnswering.from_pretrained(*UpperCamelCase__ , **UpperCamelCase__ )
| 690 | 0 |
'''simple docstring'''
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase_ ( lowercase__ ) ->tuple:
return (data["data"], data["target"])
def lowercase_ ( lowercase__ , lowercase__ , lowercase__ ) ->np.ndarray:
_snake_case: int = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(lowercase__ , lowercase__ )
# Predict target for test data
_snake_case: Tuple = xgb.predict(lowercase__ )
_snake_case: Union[str, Any] = predictions.reshape(len(lowercase__ ) , 1 )
return predictions
def lowercase_ ( ) ->None:
_snake_case: List[str] = fetch_california_housing()
_snake_case: Optional[Any] = data_handling(lowercase__ )
_snake_case: Tuple = train_test_split(
lowercase__ , lowercase__ , test_size=0.2_5 , random_state=1 )
_snake_case: Union[str, Any] = xgboost(lowercase__ , lowercase__ , lowercase__ )
# Error printing
print(F'''Mean Absolute Error : {mean_absolute_error(lowercase__ , lowercase__ )}''' )
print(F'''Mean Square Error : {mean_squared_error(lowercase__ , lowercase__ )}''' )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 710 |
'''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
A : Tuple = ['bert-base-uncased', 'bert-base-cased']
A : str = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class lowerCamelCase ( tf.keras.Model ):
def __init__( self : Union[str, Any] , __snake_case : List[Any] ):
'''simple docstring'''
super().__init__()
_snake_case: List[Any] = tokenizer
_snake_case: str = AutoConfig.from_pretrained(__snake_case )
_snake_case: List[Any] = TFAutoModel.from_config(__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : str , __snake_case : List[str] ):
'''simple docstring'''
_snake_case: Optional[int] = self.tokenizer(__snake_case )
_snake_case: Tuple = self.bert(**__snake_case )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class lowerCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
_snake_case: List[Any] = [
BertTokenizer.from_pretrained(__snake_case ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
_snake_case: List[Any] = [TFBertTokenizer.from_pretrained(__snake_case ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__snake_case , use_fast_bert_tokenizer=__snake_case )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
_snake_case: int = [
'This is a straightforward English test sentence.',
'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.',
'Now we\'re going to add some Chinese: 一 二 三 一二三',
'And some much more rare Chinese: 齉 堃 齉堃',
'Je vais aussi écrire en français pour tester les accents',
'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ',
]
_snake_case: str = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
_snake_case: List[Any] = tokenizer(__snake_case , return_tensors='tf' , padding='longest' )
_snake_case: Optional[Any] = tf_tokenizer(__snake_case )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case: str = tf_tokenizer(self.paired_sentences )
_snake_case: Any = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case: List[str] = tf.function(__snake_case )
for test_inputs in (self.test_sentences, self.paired_sentences):
_snake_case: Union[str, Any] = tf.constant(__snake_case )
_snake_case: Any = compiled_tokenizer(__snake_case )
_snake_case: Union[str, Any] = tf_tokenizer(__snake_case )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
for tf_tokenizer in self.tf_tokenizers:
_snake_case: Optional[Any] = ModelToSave(tokenizer=__snake_case )
_snake_case: Tuple = tf.convert_to_tensor(self.test_sentences )
_snake_case: List[str] = model(__snake_case ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
_snake_case: List[str] = Path(__snake_case ) / 'saved.model'
model.save(__snake_case )
_snake_case: int = tf.keras.models.load_model(__snake_case )
_snake_case: int = loaded_model(__snake_case )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 273 | 0 |
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : List[Any] = get_tests_dir("""fixtures/spiece.model""")
@require_sentencepiece
@require_tokenizers
class A__(a_, unittest.TestCase ):
"""simple docstring"""
_A : Tuple = DebertaVaTokenizer
_A : Dict = DebertaVaTokenizerFast
_A : Optional[int] = True
_A : Optional[Any] = True
def UpperCamelCase__ ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
a_ : str = DebertaVaTokenizer(_lowercase , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase__ ( self , _lowercase ) -> Union[str, Any]:
a_ : Optional[Any] = """this is a test"""
a_ : str = """this is a test"""
return input_text, output_text
def UpperCamelCase__ ( self ) -> str:
a_ : Dict = """<pad>"""
a_ : Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase ) , _lowercase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase ) , _lowercase )
def UpperCamelCase__ ( self ) -> List[str]:
a_ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(_lowercase ) , 30_001 )
def UpperCamelCase__ ( self ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def UpperCamelCase__ ( self ) -> Optional[Any]:
# fmt: off
a_ : Tuple = """ \tHeLLo!how \n Are yoU? """
a_ : Tuple = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""]
# fmt: on
a_ : Optional[Any] = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase )
a_ : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
a_ : Any = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase )
a_ : str = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCamelCase__ ( self ) -> Tuple:
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def UpperCamelCase__ ( self ) -> Optional[Any]:
pass
def UpperCamelCase__ ( self ) -> Optional[int]:
# fmt: off
a_ : Dict = """I was born in 92000, and this is falsé."""
a_ : Union[str, Any] = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
a_ : Dict = DebertaVaTokenizer(_lowercase , split_by_punct=_lowercase )
a_ : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
a_ : str = DebertaVaTokenizerFast(_lowercase , split_by_punct=_lowercase )
a_ : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Any:
# fmt: off
a_ : Union[str, Any] = """I was born in 92000, and this is falsé."""
a_ : Optional[int] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
a_ : Any = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
a_ : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
a_ : str = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
a_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> str:
# fmt: off
a_ : List[str] = """I was born in 92000, and this is falsé."""
a_ : Any = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
a_ : int = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
a_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
a_ : Tuple = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
a_ : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Optional[Any]:
# fmt: off
a_ : Tuple = """I was born in 92000, and this is falsé."""
a_ : Any = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ]
# fmt: on
a_ : List[Any] = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
a_ : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
a_ : Union[str, Any] = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
a_ : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Any:
# fmt: off
a_ : List[Any] = """ \tHeLLo!how \n Are yoU? """
a_ : Tuple = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""]
# fmt: on
a_ : Any = DebertaVaTokenizer(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
a_ : Dict = DebertaVaTokenizerFast(_lowercase , do_lower_case=_lowercase , split_by_punct=_lowercase )
a_ : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Tuple = self.get_tokenizer()
a_ : Optional[int] = self.get_rust_tokenizer()
a_ : str = """I was born in 92000, and this is falsé."""
a_ : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
a_ : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
self.assertListEqual(_lowercase , _lowercase )
a_ : Tuple = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
a_ : str = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Optional[Any] = self.get_rust_tokenizer()
a_ : Optional[Any] = tokenizer.encode(_lowercase )
a_ : Optional[int] = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> int:
a_ : Union[str, Any] = """This is a test"""
a_ : List[Any] = [13, 1, 4_398, 25, 21, 1_289]
a_ : str = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""]
a_ : Union[str, Any] = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""]
a_ : int = DebertaVaTokenizer(_lowercase , keep_accents=_lowercase )
a_ : Union[str, Any] = DebertaVaTokenizerFast(_lowercase , keep_accents=_lowercase )
a_ : int = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Tuple = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Optional[int] = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Optional[Any] = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Optional[Any] = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Any = rust_tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# fmt: off
a_ : List[Any] = """I was born in 92000, and this is falsé."""
a_ : int = [13, 1, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9]
a_ : Tuple = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ]
a_ : str = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ]
# fmt: on
a_ : Any = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Union[str, Any] = tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Tuple = tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Tuple = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : int = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
a_ : Any = rust_tokenizer.convert_ids_to_tokens(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def UpperCamelCase__ ( self ) -> Any:
a_ : Any = DebertaVaTokenizer(_lowercase )
a_ : Optional[Any] = tokenizer.encode("""sequence builders""" )
a_ : int = tokenizer.encode("""multi-sequence build""" )
a_ : Dict = tokenizer.build_inputs_with_special_tokens(_lowercase )
a_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _lowercase )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _lowercase , )
@slow
def UpperCamelCase__ ( self ) -> int:
# fmt: off
a_ : Union[str, Any] = {"""input_ids""": [[1, 39_867, 36, 19_390, 486, 27, 35_052, 81_436, 18, 60_685, 1_225, 7, 35_052, 81_436, 18, 9_367, 16_899, 18, 15_937, 53, 594, 773, 18, 16_287, 30_465, 36, 15_937, 6, 41_139, 38, 36_979, 60_763, 191, 6, 34_132, 99, 6, 50_538, 390, 43_230, 6, 34_132, 2_779, 20_850, 14, 699, 1_072, 1_194, 36, 382, 10_901, 53, 7, 699, 1_072, 2_084, 36, 20_422, 630, 53, 19, 105, 3_049, 1_896, 1_053, 16_899, 1_506, 11, 37_978, 4_243, 7, 1_237, 31_869, 200, 16_566, 654, 6, 35_052, 81_436, 7, 55_630, 13_593, 4, 2], [1, 26, 15_011, 13, 667, 8, 1_053, 18, 23_611, 1_237, 72_356, 12_820, 34, 104_134, 1_209, 35, 13_313, 6_627, 21, 202, 347, 7, 164, 2_399, 11, 46, 4_485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1_232, 2_864, 15_785, 14_951, 105, 5, 8_581, 1_250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 540 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class A__(a_, a_, unittest.TestCase ):
"""simple docstring"""
_A : Optional[Any] = StableDiffusionSAGPipeline
_A : Any = TEXT_TO_IMAGE_PARAMS
_A : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
_A : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
_A : Optional[int] = False
def UpperCamelCase__ ( self ) -> List[Any]:
torch.manual_seed(0 )
a_ : Tuple = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
a_ : int = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=_lowercase , set_alpha_to_one=_lowercase , )
torch.manual_seed(0 )
a_ : Tuple = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
a_ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
a_ : Union[str, Any] = CLIPTextModel(_lowercase )
a_ : int = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
a_ : Union[str, Any] = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""safety_checker""": None,
"""feature_extractor""": None,
}
return components
def UpperCamelCase__ ( self , _lowercase , _lowercase=0 ) -> Dict:
if str(_lowercase ).startswith("""mps""" ):
a_ : Optional[int] = torch.manual_seed(_lowercase )
else:
a_ : str = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
a_ : Tuple = {
"""prompt""": """.""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 1.0,
"""sag_scale""": 1.0,
"""output_type""": """numpy""",
}
return inputs
def UpperCamelCase__ ( self ) -> Optional[int]:
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class A__(unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase__ ( self ) -> Optional[int]:
a_ : Tuple = StableDiffusionSAGPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
a_ : List[str] = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
a_ : Optional[int] = """."""
a_ : int = torch.manual_seed(0 )
a_ : Any = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a_ : Any = output.images
a_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a_ : str = np.array([0.1_5_6_8, 0.1_7_3_8, 0.1_6_9_5, 0.1_6_9_3, 0.1_5_0_7, 0.1_7_0_5, 0.1_5_4_7, 0.1_7_5_1, 0.1_9_4_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self ) -> List[Any]:
a_ : Optional[int] = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a_ : List[str] = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
a_ : int = """."""
a_ : Dict = torch.manual_seed(0 )
a_ : Union[str, Any] = sag_pipe(
[prompt] , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" )
a_ : Optional[Any] = output.images
a_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
a_ : Dict = np.array([0.3_4_5_9, 0.2_8_7_6, 0.2_5_3_7, 0.3_0_0_2, 0.2_6_7_1, 0.2_1_6_0, 0.3_0_2_6, 0.2_2_6_2, 0.2_3_7_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def UpperCamelCase__ ( self ) -> Any:
a_ : int = StableDiffusionSAGPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
a_ : Optional[Any] = sag_pipe.to(_lowercase )
sag_pipe.set_progress_bar_config(disable=_lowercase )
a_ : List[Any] = """."""
a_ : str = torch.manual_seed(0 )
a_ : int = sag_pipe(
[prompt] , width=768 , height=512 , generator=_lowercase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="""np""" , )
a_ : Any = output.images
assert image.shape == (1, 512, 768, 3)
| 540 | 1 |
'''simple docstring'''
from __future__ import annotations
from typing import TypedDict
class SCREAMING_SNAKE_CASE__ ( __snake_case ):
_A = 42
_A = 42
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> int:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
return [s[i:] + s[:i] for i in range(len(_lowerCamelCase ) )]
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str ) -> Dict:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter s type must be str." )
if not s:
raise ValueError("The parameter s must not be empty." )
SCREAMING_SNAKE_CASE_ : int = all_rotations(_lowerCamelCase )
rotations.sort() # sort the list of rotations in alphabetically order
# make a string composed of the last char of each rotation
SCREAMING_SNAKE_CASE_ : BWTTransformDict = {
"bwt_string": "".join([word[-1] for word in rotations] ),
"idx_original_string": rotations.index(_lowerCamelCase ),
}
return response
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : int ) -> Any:
"""simple docstring"""
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("The parameter bwt_string type must be str." )
if not bwt_string:
raise ValueError("The parameter bwt_string must not be empty." )
try:
SCREAMING_SNAKE_CASE_ : int = int(_lowerCamelCase )
except ValueError:
raise TypeError(
"The parameter idx_original_string type must be int or passive"
" of cast to int." )
if idx_original_string < 0:
raise ValueError("The parameter idx_original_string must not be lower than 0." )
if idx_original_string >= len(_lowerCamelCase ):
raise ValueError(
"The parameter idx_original_string must be lower than" " len(bwt_string)." )
SCREAMING_SNAKE_CASE_ : Tuple = [""""""] * len(_lowerCamelCase )
for _ in range(len(_lowerCamelCase ) ):
for i in range(len(_lowerCamelCase ) ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = bwt_string[i] + ordered_rotations[i]
ordered_rotations.sort()
return ordered_rotations[idx_original_string]
if __name__ == "__main__":
snake_case_ : Optional[int] = 'Provide a string that I will generate its BWT transform: '
snake_case_ : List[str] = input(entry_msg).strip()
snake_case_ : Union[str, Any] = bwt_transform(s)
print(
F'''Burrows Wheeler transform for string \'{s}\' results '''
F'''in \'{result["bwt_string"]}\''''
)
snake_case_ : str = reverse_bwt(result['bwt_string'], result['idx_original_string'])
print(
F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' '''
F'''we get original string \'{original_string}\''''
)
| 719 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_ : Any = DisjunctiveConstraint(lowercase__ )
self.assertTrue(isinstance(dc.token_ids , lowercase__ ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowercase__ ):
DisjunctiveConstraint(lowercase__ ) # fails here
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_ : Optional[Any] = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
SCREAMING_SNAKE_CASE_ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(2 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is False and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Any = dc.update(3 )
SCREAMING_SNAKE_CASE_ : Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowercase__ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_ : Dict = DisjunctiveConstraint(lowercase__ )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Dict = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : List[Any] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Tuple = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : str = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 68 | 0 |
'''simple docstring'''
import unittest
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
if is_torch_available():
import torch
from transformers import AutoModelForImageClassification
if is_vision_available():
from transformers import AutoImageProcessor
@require_torch
@require_vision
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Tuple = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
__SCREAMING_SNAKE_CASE : Tuple = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""" )
model.to(lowerCAmelCase__ )
from datasets import load_dataset
__SCREAMING_SNAKE_CASE : str = load_dataset("""nielsr/rvlcdip-demo""" )
__SCREAMING_SNAKE_CASE : Tuple = dataset["""train"""][0]["""image"""].convert("""RGB""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(lowerCAmelCase__ , return_tensors="""pt""" ).to(lowerCAmelCase__ )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE : List[Any] = model(**lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : Optional[int] = outputs.logits
__SCREAMING_SNAKE_CASE : Optional[int] = torch.Size((1, 1_6) )
self.assertEqual(logits.shape , lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[-0.41_58, -0.40_92, -0.43_47] , device=lowerCAmelCase__ , dtype=torch.float , )
self.assertTrue(torch.allclose(logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) ) | 578 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def lowerCAmelCase_ ( _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: str , _lowerCamelCase: PreTrainedTokenizer , _lowerCamelCase: int , _lowerCamelCase: Optional[int] = None , ):
__SCREAMING_SNAKE_CASE : Union[str, Any] = {}
if train_file is not None:
__SCREAMING_SNAKE_CASE : Any = [train_file]
if eval_file is not None:
__SCREAMING_SNAKE_CASE : Any = [eval_file]
if test_file is not None:
__SCREAMING_SNAKE_CASE : Optional[Any] = [test_file]
__SCREAMING_SNAKE_CASE : Optional[Any] = datasets.load_dataset("""csv""" , data_files=_lowerCamelCase )
__SCREAMING_SNAKE_CASE : List[str] = list(ds[list(files.keys() )[0]].features.keys() )
__SCREAMING_SNAKE_CASE : Dict = features_name.pop(_lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = list(set(ds[list(files.keys() )[0]][label_name] ) )
__SCREAMING_SNAKE_CASE : str = {label: i for i, label in enumerate(_lowerCamelCase )}
__SCREAMING_SNAKE_CASE : Any = tokenizer.model_input_names
__SCREAMING_SNAKE_CASE : Any = {}
if len(_lowerCamelCase ) == 1:
for k in files.keys():
__SCREAMING_SNAKE_CASE : Optional[int] = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" ) , batched=_lowerCamelCase , )
elif len(_lowerCamelCase ) == 2:
for k in files.keys():
__SCREAMING_SNAKE_CASE : int = ds[k].map(
lambda _lowerCamelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" , ) , batched=_lowerCamelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__SCREAMING_SNAKE_CASE : int = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : str = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__SCREAMING_SNAKE_CASE : str = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : int = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__SCREAMING_SNAKE_CASE : Optional[int] = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE : str = labelaid[ex[label_name]]
yield (d, label)
__SCREAMING_SNAKE_CASE : Tuple = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__SCREAMING_SNAKE_CASE : Dict = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__SCREAMING_SNAKE_CASE : Any = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = (
tf.data.Dataset.from_generator(
_lowerCamelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__SCREAMING_SNAKE_CASE : Optional[int] = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
UpperCamelCase__ : List[str] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : int = field(metadata={'''help''': '''Which column contains the label'''} )
_A : str = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the training file'''} )
_A : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the development file'''} )
_A : Optional[str] = field(default=lowerCamelCase__ , metadata={'''help''': '''The path of the test file'''} )
_A : int = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_A : bool = field(
default=lowerCamelCase__ , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_A : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
_A : bool = field(default=lowerCamelCase__ , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
_A : Optional[str] = field(
default=lowerCamelCase__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def lowerCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__SCREAMING_SNAKE_CASE : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
F"n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, "
F"16-bits training: {training_args.fpaa}" )
logger.info(F"Training/evaluation parameters {training_args}" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE : Any = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=_lowerCamelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(_lowerCamelCase ) , labelaid=_lowerCamelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__SCREAMING_SNAKE_CASE : List[Any] = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=_lowerCamelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(_lowerCamelCase: EvalPrediction ) -> Dict:
__SCREAMING_SNAKE_CASE : List[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE : List[Any] = TFTrainer(
model=_lowerCamelCase , args=_lowerCamelCase , train_dataset=_lowerCamelCase , eval_dataset=_lowerCamelCase , compute_metrics=_lowerCamelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.evaluate()
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(_lowerCamelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(F" {key} = {value}" )
writer.write(F"{key} = {value}\n" )
results.update(_lowerCamelCase )
return results
if __name__ == "__main__":
main() | 578 | 1 |
from string import ascii_uppercase
lowerCamelCase__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowerCAmelCase__ : Union[str, Any] = ''
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : int = 0
while div != 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if base >= 11 and 9 < mod < 36:
lowerCAmelCase__ : Optional[Any] = ALPHABET_VALUES[str(SCREAMING_SNAKE_CASE_ )]
else:
lowerCAmelCase__ : List[str] = str(SCREAMING_SNAKE_CASE_ )
new_value += actual_value
lowerCAmelCase__ : List[str] = num // base
lowerCAmelCase__ : Optional[int] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(SCREAMING_SNAKE_CASE_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
) | 69 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : str , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1
lowerCAmelCase__ : Tuple = samplea.shape[0]
lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a )
lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCAmelCase__ : str = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Any = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : List[str] = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : List[str] = self.dummy_sample_deter
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : str = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowerCAmelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowerCAmelCase__ : Tuple = -1
else:
lowerCAmelCase__ : Dict = timesteps[i + 1]
lowerCAmelCase__ : str = scheduler.previous_timestep(a )
lowerCAmelCase__ : int = prev_t.item()
self.assertEqual(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 1, 0]
lowerCAmelCase__ : int = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a ) | 69 | 1 |
"""simple docstring"""
def lowerCamelCase_ ( UpperCAmelCase_ = 10_00 ) ->int:
"""simple docstring"""
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 522 |
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : str ) ->bool:
return credit_card_number.startswith(("""34""", """35""", """37""", """4""", """5""", """6""") )
def lowerCamelCase ( __lowerCamelCase : str ) ->bool:
_SCREAMING_SNAKE_CASE = credit_card_number
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = len(__lowerCamelCase ) - 2
for i in range(__lowerCamelCase , -1 , -2 ):
# double the value of every second digit
_SCREAMING_SNAKE_CASE = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
_SCREAMING_SNAKE_CASE = cc_number[:i] + str(__lowerCamelCase ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(__lowerCamelCase ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def lowerCamelCase ( __lowerCamelCase : str ) ->bool:
_SCREAMING_SNAKE_CASE = F'{credit_card_number} is an invalid credit card number because'
if not credit_card_number.isdigit():
print(F'{error_message} it has nonnumerical characters.' )
return False
if not 13 <= len(__lowerCamelCase ) <= 16:
print(F'{error_message} of its length.' )
return False
if not validate_initial_digits(__lowerCamelCase ):
print(F'{error_message} of its first two digits.' )
return False
if not luhn_validation(__lowerCamelCase ):
print(F'{error_message} it fails the Luhn check.' )
return False
print(F'{credit_card_number} is a valid credit card number.' )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number("""4111111111111111""")
validate_credit_card_number("""32323""")
| 314 | 0 |
import math
def lowercase__( A , A ):
if (
not isinstance(A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * power_factor
def lowercase__( A , A ):
if (
not isinstance(A , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError('power_factor must be a valid float value between -1 and 1.' )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
lowerCamelCase : str = 2_9_9_7_9_2_4_5_8
# Symbols
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = symbols('ct x y z')
def lowercase__( A ):
if velocity > c:
raise ValueError('Speed must not exceed light speed 299,792,458 [m/s]!' )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError('Speed must be greater than or equal to 1!' )
return velocity / c
def lowercase__( A ):
return 1 / sqrt(1 - beta(A ) ** 2 )
def lowercase__( A ):
return np.array(
[
[gamma(A ), -gamma(A ) * beta(A ), 0, 0],
[-gamma(A ) * beta(A ), gamma(A ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase__( A , A = None ):
# Ensure event is not empty
if event is None:
snake_case__ : Union[str, Any] = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(A ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
lowerCamelCase : Dict = transform(2_9_9_7_9_2_4_5)
print('Example of four vector: ')
print(F"""ct' = {four_vector[0]}""")
print(F"""x' = {four_vector[1]}""")
print(F"""y' = {four_vector[2]}""")
print(F"""z' = {four_vector[3]}""")
# Substitute symbols with numerical values
lowerCamelCase : List[Any] = {ct: c, x: 1, y: 1, z: 1}
lowerCamelCase : Dict = [four_vector[i].subs(sub_dict) for i in range(4)]
print(F"""\n{numerical_vector}""")
| 303 | 0 |
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] , UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = AutoConfig.from_pretrained(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = FlaxAutoModelForSeqaSeqLM.from_config(config=UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
SCREAMING_SNAKE_CASE__ = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
SCREAMING_SNAKE_CASE__ = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ = f'''layers_{str(UpperCamelCase__ )}'''
# Self-Attention
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
SCREAMING_SNAKE_CASE__ = flax_model.params["""encoder"""]["""block"""][str(UpperCamelCase__ )]["""layer"""]
SCREAMING_SNAKE_CASE__ = tax_attention_key
SCREAMING_SNAKE_CASE__ = tax_attention_out
SCREAMING_SNAKE_CASE__ = tax_attention_query
SCREAMING_SNAKE_CASE__ = tax_attention_value
SCREAMING_SNAKE_CASE__ = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_global_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi
SCREAMING_SNAKE_CASE__ = tax_mlp_wo
SCREAMING_SNAKE_CASE__ = tax_mlp_layer_norm
SCREAMING_SNAKE_CASE__ = flax_model_encoder_layer_block
# Only for layer 0:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
SCREAMING_SNAKE_CASE__ = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
SCREAMING_SNAKE_CASE__ = tax_encoder_global_rel_embedding
# Assigning
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
SCREAMING_SNAKE_CASE__ = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
SCREAMING_SNAKE_CASE__ = f'''layers_{str(UpperCamelCase__ )}'''
# Self-Attention
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module["""key"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module["""out"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module["""query"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
SCREAMING_SNAKE_CASE__ = flax_model.params["""decoder"""]["""block"""][str(UpperCamelCase__ )]["""layer"""]
SCREAMING_SNAKE_CASE__ = tax_attention_key
SCREAMING_SNAKE_CASE__ = tax_attention_out
SCREAMING_SNAKE_CASE__ = tax_attention_query
SCREAMING_SNAKE_CASE__ = tax_attention_value
SCREAMING_SNAKE_CASE__ = tax_pre_attention_layer_norm
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_key
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_out
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_query
SCREAMING_SNAKE_CASE__ = tax_enc_dec_attention_value
SCREAMING_SNAKE_CASE__ = tax_cross_layer_norm
if split_mlp_wi:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
SCREAMING_SNAKE_CASE__ = tax_mlp_wi_a
else:
SCREAMING_SNAKE_CASE__ = tax_mlp_wi
SCREAMING_SNAKE_CASE__ = tax_mlp_wo
SCREAMING_SNAKE_CASE__ = txa_mlp_layer_norm
SCREAMING_SNAKE_CASE__ = flax_model_decoder_layer_block
# Decoder Normalization
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
SCREAMING_SNAKE_CASE__ = txa_decoder_norm
# Only for layer 0:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
SCREAMING_SNAKE_CASE__ = tax_decoder_rel_embedding
# Token Embeddings
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
SCREAMING_SNAKE_CASE__ = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
SCREAMING_SNAKE_CASE__ = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(UpperCamelCase__ )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_lowerCamelCase = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path) | 6 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowerCamelCase = logging.get_logger(__name__)
def a__ ( _SCREAMING_SNAKE_CASE : Tuple ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(F'''Could not make batched video from {videos}''' )
class _snake_case (__SCREAMING_SNAKE_CASE):
__A : Tuple =["pixel_values"]
def __init__( self ,_snake_case = True ,_snake_case = None ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = True ,_snake_case = None ,_snake_case = True ,_snake_case = 1 / 2_55 ,_snake_case = True ,_snake_case = True ,_snake_case = None ,_snake_case = None ,**_snake_case ,):
super().__init__(**_snake_case )
UpperCAmelCase_ : Optional[Any] = size if size is not None else {"shortest_edge": 2_56}
UpperCAmelCase_ : List[str] = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : str = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24}
UpperCAmelCase_ : Optional[Any] = get_size_dict(_snake_case ,param_name="crop_size" )
UpperCAmelCase_ : int = do_resize
UpperCAmelCase_ : List[str] = size
UpperCAmelCase_ : Dict = do_center_crop
UpperCAmelCase_ : Optional[Any] = crop_size
UpperCAmelCase_ : Optional[Any] = resample
UpperCAmelCase_ : int = do_rescale
UpperCAmelCase_ : Optional[int] = rescale_factor
UpperCAmelCase_ : Dict = offset
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = PILImageResampling.BILINEAR ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Any = get_size_dict(_snake_case ,default_to_square=_snake_case )
if "shortest_edge" in size:
UpperCAmelCase_ : Optional[Any] = get_resize_output_image_size(_snake_case ,size["shortest_edge"] ,default_to_square=_snake_case )
elif "height" in size and "width" in size:
UpperCAmelCase_ : Optional[Any] = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(_snake_case ,size=_snake_case ,resample=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : Dict = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(_snake_case ,size=(size["height"], size["width"]) ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case = True ,_snake_case = None ,**_snake_case ,):
UpperCAmelCase_ : int = image.astype(np.floataa )
if offset:
UpperCAmelCase_ : Any = image - (scale / 2)
return rescale(_snake_case ,scale=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case ,_snake_case = None ,**_snake_case ,):
return normalize(_snake_case ,mean=_snake_case ,std=_snake_case ,data_format=_snake_case ,**_snake_case )
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
if offset and not do_rescale:
raise ValueError("For offset, do_rescale must also be set to True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ : Optional[int] = to_numpy_array(_snake_case )
if do_resize:
UpperCAmelCase_ : Dict = self.resize(image=_snake_case ,size=_snake_case ,resample=_snake_case )
if do_center_crop:
UpperCAmelCase_ : Optional[Any] = self.center_crop(_snake_case ,size=_snake_case )
if do_rescale:
UpperCAmelCase_ : Union[str, Any] = self.rescale(image=_snake_case ,scale=_snake_case ,offset=_snake_case )
if do_normalize:
UpperCAmelCase_ : Any = self.normalize(image=_snake_case ,mean=_snake_case ,std=_snake_case )
UpperCAmelCase_ : Any = to_channel_dimension_format(_snake_case ,_snake_case )
return image
def UpperCamelCase__ ( self ,_snake_case ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = None ,_snake_case = ChannelDimension.FIRST ,**_snake_case ,):
UpperCAmelCase_ : Tuple = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ : str = resample if resample is not None else self.resample
UpperCAmelCase_ : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ : List[Any] = offset if offset is not None else self.offset
UpperCAmelCase_ : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ : int = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ : int = image_std if image_std is not None else self.image_std
UpperCAmelCase_ : Dict = size if size is not None else self.size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,default_to_square=_snake_case )
UpperCAmelCase_ : List[Any] = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ : int = get_size_dict(_snake_case ,param_name="crop_size" )
if not valid_images(_snake_case ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ : Any = make_batched(_snake_case )
UpperCAmelCase_ : Dict = [
[
self._preprocess_image(
image=_snake_case ,do_resize=_snake_case ,size=_snake_case ,resample=_snake_case ,do_center_crop=_snake_case ,crop_size=_snake_case ,do_rescale=_snake_case ,rescale_factor=_snake_case ,offset=_snake_case ,do_normalize=_snake_case ,image_mean=_snake_case ,image_std=_snake_case ,data_format=_snake_case ,)
for img in video
]
for video in videos
]
UpperCAmelCase_ : List[str] = {"pixel_values": videos}
return BatchFeature(data=_snake_case ,tensor_type=_snake_case )
| 71 | 0 |
UpperCAmelCase_ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
UpperCAmelCase_ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def lowerCamelCase__ ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
'''simple docstring'''
_snake_case = True
_snake_case = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
order.append(UpperCamelCase__ )
return order
def lowerCamelCase__ ( UpperCamelCase__ : dict[int, list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : list[bool] ) -> list[int]:
'''simple docstring'''
_snake_case = True
_snake_case = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return component
def lowerCamelCase__ ( UpperCamelCase__ : dict[int, list[int]] ) -> list[list[int]]:
'''simple docstring'''
_snake_case = len(UpperCamelCase__ ) * [False]
_snake_case = {vert: [] for vert in range(len(UpperCamelCase__ ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase__ )
_snake_case = []
for i, was_visited in enumerate(UpperCamelCase__ ):
if not was_visited:
order += topology_sort(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_snake_case = []
_snake_case = len(UpperCamelCase__ ) * [False]
for i in range(len(UpperCamelCase__ ) ):
_snake_case = order[len(UpperCamelCase__ ) - i - 1]
if not visited[vert]:
_snake_case = find_components(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
components_list.append(UpperCamelCase__ )
return components_list
| 541 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[str] ) -> Tuple:
'''simple docstring'''
if os.path.exists(UpperCamelCase__ ):
if os.path.exists(os.path.join(UpperCamelCase__ , 'config.json' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , 'config.json' ) ):
os.remove(os.path.join(UpperCamelCase__ , 'config.json' ) )
if os.path.exists(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) ):
os.remove(os.path.join(UpperCamelCase__ , 'pytorch_model.bin' ) )
else:
os.makedirs(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
def lowerCamelCase__ ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict=False ) -> str:
'''simple docstring'''
_snake_case = 2
if unlogit:
_snake_case = torch.pow(UpperCamelCase__ , UpperCamelCase__ )
_snake_case = p * torch.log(UpperCamelCase__ )
_snake_case = 0
return -plogp.sum(dim=-1 )
def lowerCamelCase__ ( UpperCamelCase__ : Dict ) -> Optional[Any]:
'''simple docstring'''
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(UpperCamelCase__ ) ) ) )
for row in range(len(UpperCamelCase__ ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def lowerCamelCase__ ( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : str=None , UpperCamelCase__ : Optional[int]=False ) -> Any:
'''simple docstring'''
_snake_case , _snake_case = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
_snake_case = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
if head_mask is None:
_snake_case = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device )
head_mask.requires_grad_(requires_grad=UpperCamelCase__ )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case = None
_snake_case = 0.0
_snake_case = 0.0
for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(UpperCamelCase__ ):
_snake_case = entropy(attn.detach() , UpperCamelCase__ )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case = 2
_snake_case = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1e-20
if not args.dont_normalize_global_importance:
_snake_case = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(UpperCamelCase__ )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(UpperCamelCase__ )
logger.info('Head ranked by importance scores' )
_snake_case = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case = torch.arange(
head_importance.numel() , device=args.device )
_snake_case = head_ranks.view_as(UpperCamelCase__ )
print_ad_tensor(UpperCamelCase__ )
return attn_entropy, head_importance, total_loss
def lowerCamelCase__ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> List[str]:
'''simple docstring'''
_snake_case , _snake_case , _snake_case = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ )
_snake_case = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , UpperCamelCase__ , original_score * args.masking_threshold )
_snake_case = torch.ones_like(UpperCamelCase__ )
_snake_case = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case = float('Inf' )
_snake_case = head_importance.view(-1 ).sort()[1]
if len(UpperCamelCase__ ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case = new_head_mask.view(-1 )
_snake_case = 0.0
_snake_case = new_head_mask.view_as(UpperCamelCase__ )
_snake_case = new_head_mask.clone().detach()
print_ad_tensor(UpperCamelCase__ )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_snake_case = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info('Final head mask' )
print_ad_tensor(UpperCamelCase__ )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def lowerCamelCase__ ( UpperCamelCase__ : Any , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> List[Any]:
'''simple docstring'''
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) )
}
for k, v in heads_to_prune.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
_snake_case = [
v,
]
assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(UpperCamelCase__ )
_snake_case = sum(p.numel() for p in model.parameters() )
_snake_case = datetime.now()
_snake_case , _snake_case , _snake_case = compute_heads_importance(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , )
_snake_case = 1 / loss
_snake_case = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , UpperCamelCase__ , UpperCamelCase__ )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 100 )
save_model(UpperCamelCase__ , args.output_dir )
def lowerCamelCase__ ( ) -> str:
'''simple docstring'''
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=UpperCamelCase__ , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=UpperCamelCase__ , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=UpperCamelCase__ , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=UpperCamelCase__ , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=UpperCamelCase__ , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=UpperCamelCase__ , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=128 , type=UpperCamelCase__ , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=UpperCamelCase__ , help='Batch size.' )
parser.add_argument('--seed' , type=UpperCamelCase__ , default=42 )
parser.add_argument('--local_rank' , type=UpperCamelCase__ , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=UpperCamelCase__ , default='' , help='Can be used for distant debugging.' )
_snake_case = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case = torch.device('cuda' , args.local_rank )
_snake_case = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case = nn.parallel.DistributedDataParallel(
UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ )
elif args.n_gpu > 1:
_snake_case = nn.DataParallel(UpperCamelCase__ )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
torch.save(UpperCamelCase__ , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , UpperCamelCase__ )
# Prepare dataset
_snake_case = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case = (torch.from_numpy(UpperCamelCase__ ),)
_snake_case = TensorDataset(*UpperCamelCase__ )
_snake_case = RandomSampler(UpperCamelCase__ )
_snake_case = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 541 | 1 |
from random import shuffle
import tensorflow as tf
from numpy import array
def UpperCamelCase ( _A : Dict , _A : str )-> Tuple:
"""simple docstring"""
A__ = int(UpperCAmelCase__ )
assert noofclusters < len(UpperCAmelCase__ )
# Find out the dimensionality
A__ = len(vectors[0] )
# Will help select random centroids from among the available vectors
A__ = list(range(len(UpperCAmelCase__ ) ) )
shuffle(UpperCAmelCase__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
A__ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
A__ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
A__ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(UpperCAmelCase__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
A__ = tf.placeholder("float64" , [dim] )
A__ = []
for centroid in centroids:
cent_assigns.append(tf.assign(UpperCAmelCase__ , UpperCAmelCase__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
A__ = [tf.Variable(0 ) for i in range(len(UpperCAmelCase__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
A__ = tf.placeholder("int32" )
A__ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(UpperCAmelCase__ , UpperCAmelCase__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
A__ = tf.placeholder("float" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
A__ = tf.reduce_mean(UpperCAmelCase__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
A__ = tf.placeholder("float" , [dim] )
A__ = tf.placeholder("float" , [dim] )
A__ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(UpperCAmelCase__ , UpperCAmelCase__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
A__ = tf.placeholder("float" , [noofclusters] )
A__ = tf.argmin(UpperCAmelCase__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
A__ = tf.initialize_all_variables()
# Initialize all variables
sess.run(UpperCAmelCase__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
A__ = 100
for _ in range(UpperCAmelCase__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(UpperCAmelCase__ ) ):
A__ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
A__ = [
sess.run(UpperCAmelCase__ , feed_dict={va: vect, va: sess.run(UpperCAmelCase__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
A__ = sess.run(
UpperCAmelCase__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(UpperCAmelCase__ ):
# Collect all the vectors assigned to this cluster
A__ = [
vectors[i]
for i in range(len(UpperCAmelCase__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
A__ = sess.run(
UpperCAmelCase__ , feed_dict={mean_input: array(UpperCAmelCase__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
A__ = sess.run(UpperCAmelCase__ )
A__ = sess.run(UpperCAmelCase__ )
return centroids, assignments
| 491 | '''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : nn.Module
A_ : List[nn.Module] = field(default_factory=_SCREAMING_SNAKE_CASE )
A_ : list = field(default_factory=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tensor , UpperCamelCase_ : Tensor ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase_ , nn.Convad ) or isinstance(UpperCamelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase_ )
def __call__( self : Tuple , UpperCamelCase_ : Tensor ) -> int:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self : Tuple ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : nn.Module
A_ : nn.Module
A_ : int = 0
A_ : List = field(default_factory=_SCREAMING_SNAKE_CASE )
A_ : List = field(default_factory=_SCREAMING_SNAKE_CASE )
def __call__( self : List[Any] , UpperCamelCase_ : Tensor ) -> int:
SCREAMING_SNAKE_CASE__ :List[str] = Tracker(self.dest )(UpperCamelCase_ ).parametrized
SCREAMING_SNAKE_CASE__ :Any = Tracker(self.src )(UpperCamelCase_ ).parametrized
SCREAMING_SNAKE_CASE__ :Dict = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.src_skip , UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :str = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.dest_skip , UpperCamelCase_ ) )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(UpperCamelCase_ )} operations while'''
f''' destination module has {len(UpperCamelCase_ )}.''' )
for dest_m, src_m in zip(UpperCamelCase_ , UpperCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : ResNetConfig , UpperCAmelCase__ : Path , UpperCAmelCase__ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :str = timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE__ :List[str] = ResNetForImageClassification(UpperCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE__ :Dict = ModuleTransfer(src=UpperCAmelCase__ , dest=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCAmelCase__ )
assert torch.allclose(from_model(UpperCAmelCase__ ) , our_model(UpperCAmelCase__ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE__ :List[str] = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCAmelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCAmelCase__ , )
# we can use the convnext one
SCREAMING_SNAKE_CASE__ :int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase__ , )
print(F'''Pushed {checkpoint_name}''' )
def lowerCamelCase ( UpperCAmelCase__ : Path , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = True ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ :Any = 1_0_0_0
SCREAMING_SNAKE_CASE__ :List[Any] = (1, num_labels)
SCREAMING_SNAKE_CASE__ :List[str] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ :Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE__ :Tuple = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ :int = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :str = idalabel
SCREAMING_SNAKE_CASE__ :Any = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :Optional[Any] = partial(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[str] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase__ , names_to_config[model_name] , UpperCAmelCase__ , UpperCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 209 | 0 |
'''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
) | 709 |
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json',
# See all GPT-J models at https://huggingface.co/models?filter=gpt_j
}
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ ='gptj'
lowerCamelCase__ ={
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Dict , a : Optional[Any]=5_0400 , a : List[str]=2048 , a : List[Any]=4096 , a : int=28 , a : Union[str, Any]=16 , a : List[Any]=64 , a : int=None , a : Optional[int]="gelu_new" , a : Optional[Any]=0.0 , a : Any=0.0 , a : Union[str, Any]=0.0 , a : Union[str, Any]=1e-5 , a : Any=0.02 , a : Optional[int]=True , a : Tuple=5_0256 , a : Union[str, Any]=5_0256 , a : List[Any]=False , **a : str , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = vocab_size
SCREAMING_SNAKE_CASE : int = n_positions
SCREAMING_SNAKE_CASE : Tuple = n_embd
SCREAMING_SNAKE_CASE : Tuple = n_layer
SCREAMING_SNAKE_CASE : List[Any] = n_head
SCREAMING_SNAKE_CASE : Tuple = n_inner
SCREAMING_SNAKE_CASE : Any = rotary_dim
SCREAMING_SNAKE_CASE : str = activation_function
SCREAMING_SNAKE_CASE : int = resid_pdrop
SCREAMING_SNAKE_CASE : Optional[int] = embd_pdrop
SCREAMING_SNAKE_CASE : Tuple = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : int = initializer_range
SCREAMING_SNAKE_CASE : Tuple = use_cache
SCREAMING_SNAKE_CASE : Union[str, Any] = bos_token_id
SCREAMING_SNAKE_CASE : List[Any] = eos_token_id
super().__init__(
bos_token_id=a , eos_token_id=a , tie_word_embeddings=a , **a )
class _UpperCamelCase ( __A ):
'''simple docstring'''
def __init__( self : Optional[int] , a : PretrainedConfig , a : str = "default" , a : List[PatchingSpec] = None , a : bool = False , ) -> Any:
"""simple docstring"""
super().__init__(a , task=a , patching_specs=a , use_past=a )
if not getattr(self._config , "pad_token_id" , a ):
# TODO: how to do that better?
SCREAMING_SNAKE_CASE : Dict = 0
@property
def __UpperCamelCase ( self : Any ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}} )
if self.use_past:
self.fill_with_past_key_values_(a , direction="inputs" )
SCREAMING_SNAKE_CASE : int = {0: "batch", 1: "past_sequence + sequence"}
else:
SCREAMING_SNAKE_CASE : Any = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __UpperCamelCase ( self : Any ) -> int:
"""simple docstring"""
return self._config.n_layer
@property
def __UpperCamelCase ( self : str ) -> int:
"""simple docstring"""
return self._config.n_head
def __UpperCamelCase ( self : str , a : PreTrainedTokenizer , a : int = -1 , a : int = -1 , a : bool = False , a : Optional[TensorType] = None , ) -> Mapping[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : int = super(a , self ).generate_dummy_inputs(
a , batch_size=a , seq_length=a , is_pair=a , framework=a )
# We need to order the input in the way they appears in the forward()
SCREAMING_SNAKE_CASE : Tuple = OrderedDict({"input_ids": common_inputs["input_ids"]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Optional[Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
SCREAMING_SNAKE_CASE : Any = seqlen + 2
SCREAMING_SNAKE_CASE : Dict = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
SCREAMING_SNAKE_CASE : str = [
(torch.zeros(a ), torch.zeros(a )) for _ in range(self.num_layers )
]
SCREAMING_SNAKE_CASE : Optional[int] = common_inputs["attention_mask"]
if self.use_past:
SCREAMING_SNAKE_CASE : List[str] = ordered_inputs["attention_mask"].dtype
SCREAMING_SNAKE_CASE : Any = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(a , a , dtype=a )] , dim=1 )
return ordered_inputs
@property
def __UpperCamelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
return 13 | 193 | 0 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCamelCase : str = logging.get_logger(__name__)
class lowerCAmelCase__ ( __magic_name__ ):
'''simple docstring'''
lowercase_ = ["input_values", "padding_mask"]
def __init__( self , lowercase__ = 1 , lowercase__ = 2_4_0_0_0 , lowercase__ = 0.0 , lowercase__ = None , lowercase__ = None , **lowercase__ , ):
'''simple docstring'''
super().__init__(feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ )
__A =chunk_length_s
__A =overlap
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase ( self ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self , lowercase__ , lowercase__ = None , lowercase__ = False , lowercase__ = None , lowercase__ = None , lowercase__ = None , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of'''
f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'''
f''' {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
__A =True
__A =bool(
isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__A =[np.asarray(UpperCamelCase_ , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ):
__A =np.asarray(UpperCamelCase_ , dtype=np.floataa )
elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__A =raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__A =[np.asarray(UpperCamelCase_ ).T]
# verify inputs are valid
for idx, example in enumerate(UpperCamelCase_ ):
if example.ndim > 2:
raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' )
__A =None
__A =BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__A =min(array.shape[0] for array in raw_audio )
__A =int(np.floor(max_length / self.chunk_stride ) )
__A =(nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__A =max(array.shape[0] for array in raw_audio )
__A =int(np.ceil(max_length / self.chunk_stride ) )
__A =(nb_step - 1) * self.chunk_stride + self.chunk_length
__A ='''max_length'''
else:
__A =input_values
# normal padding on batch
if padded_inputs is None:
__A =self.pad(
UpperCamelCase_ , max_length=UpperCamelCase_ , truncation=UpperCamelCase_ , padding=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , )
if padding:
__A =padded_inputs.pop('''attention_mask''' )
__A =[]
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
__A =example[..., None]
input_values.append(example.T )
__A =input_values
if return_tensors is not None:
__A =padded_inputs.convert_to_tensors(UpperCamelCase_ )
return padded_inputs
| 184 |
"""simple docstring"""
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
A_ = logging.getLogger(__name__)
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self: Optional[int] , UpperCamelCase_: Optional[int] , UpperCamelCase_: int , UpperCamelCase_: Dict=None , UpperCamelCase_: Dict=None ):
UpperCamelCase_ =self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] )
UpperCamelCase_ =layer_outputs[0]
return hidden_states
@add_start_docstrings(
"The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , UpperCAmelCase , )
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self: int , UpperCamelCase_: Optional[Any] ):
super().__init__(UpperCamelCase_ )
UpperCamelCase_ =BertEncoderWithPabee(UpperCamelCase_ )
self.init_weights()
UpperCamelCase_ =0
UpperCamelCase_ =0
UpperCamelCase_ =0
UpperCamelCase_ =0
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: List[Any] ):
UpperCamelCase_ =threshold
def UpperCamelCase__ ( self: Any , UpperCamelCase_: Optional[Any] ):
UpperCamelCase_ =patience
def UpperCamelCase__ ( self: Tuple ):
UpperCamelCase_ =0
UpperCamelCase_ =0
def UpperCamelCase__ ( self: str ):
UpperCamelCase_ =self.inference_layers_num / self.inference_instances_num
UpperCamelCase_ =(
f"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
f""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(UpperCamelCase_ )
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: str=None , UpperCamelCase_: Dict=None , UpperCamelCase_: List[str]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: Optional[int]=None , UpperCamelCase_: List[Any]=None , UpperCamelCase_: int=None , UpperCamelCase_: Optional[Any]=None , UpperCamelCase_: Dict=False , ):
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time" )
elif input_ids is not None:
UpperCamelCase_ =input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase_ =inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds" )
UpperCamelCase_ =input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase_ =torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
if token_type_ids is None:
UpperCamelCase_ =torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase_ =self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ =encoder_hidden_states.size()
UpperCamelCase_ =(encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCamelCase_ =torch.ones(UpperCamelCase_ , device=UpperCamelCase_ )
UpperCamelCase_ =self.invert_attention_mask(UpperCamelCase_ )
else:
UpperCamelCase_ =None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase_ =self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers )
UpperCamelCase_ =self.embeddings(
input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ )
UpperCamelCase_ =embedding_output
if self.training:
UpperCamelCase_ =[]
for i in range(self.config.num_hidden_layers ):
UpperCamelCase_ =self.encoder.adaptive_forward(
UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ )
UpperCamelCase_ =self.pooler(UpperCamelCase_ )
UpperCamelCase_ =output_layers[i](output_dropout(UpperCamelCase_ ) )
res.append(UpperCamelCase_ )
elif self.patience == 0: # Use all layers for inference
UpperCamelCase_ =self.encoder(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , )
UpperCamelCase_ =self.pooler(encoder_outputs[0] )
UpperCamelCase_ =[output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )]
else:
UpperCamelCase_ =0
UpperCamelCase_ =None
UpperCamelCase_ =0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCamelCase_ =self.encoder.adaptive_forward(
UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ )
UpperCamelCase_ =self.pooler(UpperCamelCase_ )
UpperCamelCase_ =output_layers[i](UpperCamelCase_ )
if regression:
UpperCamelCase_ =logits.detach()
if patient_result is not None:
UpperCamelCase_ =patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCamelCase_ =0
else:
UpperCamelCase_ =logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCamelCase_ =patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ):
patient_counter += 1
else:
UpperCamelCase_ =0
UpperCamelCase_ =logits
if patient_counter == self.patience:
break
UpperCamelCase_ =[patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , UpperCAmelCase , )
class __lowerCAmelCase ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self: int , UpperCamelCase_: Tuple ):
super().__init__(UpperCamelCase_ )
UpperCamelCase_ =config.num_labels
UpperCamelCase_ =BertModelWithPabee(UpperCamelCase_ )
UpperCamelCase_ =nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase_ =nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
def UpperCamelCase__ ( self: Union[str, Any] , UpperCamelCase_: int=None , UpperCamelCase_: Dict=None , UpperCamelCase_: str=None , UpperCamelCase_: str=None , UpperCamelCase_: Dict=None , UpperCamelCase_: Dict=None , UpperCamelCase_: int=None , ):
UpperCamelCase_ =self.bert(
input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCamelCase_ =(logits[-1],)
if labels is not None:
UpperCamelCase_ =None
UpperCamelCase_ =0
for ix, logits_item in enumerate(UpperCamelCase_ ):
if self.num_labels == 1:
# We are doing regression
UpperCamelCase_ =MSELoss()
UpperCamelCase_ =loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase_ =CrossEntropyLoss()
UpperCamelCase_ =loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCamelCase_ =loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCamelCase_ =(total_loss / total_weights,) + outputs
return outputs
| 391 | 0 |
'''simple docstring'''
def __lowerCAmelCase ( snake_case__ ):
try:
__UpperCamelCase : List[str] = float(snake_case__ )
except ValueError:
raise ValueError("Please enter a valid number" )
__UpperCamelCase : Tuple = decimal - int(snake_case__ )
if fractional_part == 0:
return int(snake_case__ ), 1
else:
__UpperCamelCase : Any = len(str(snake_case__ ).split("." )[1] )
__UpperCamelCase : List[str] = int(decimal * (10**number_of_frac_digits) )
__UpperCamelCase : Union[str, Any] = 10**number_of_frac_digits
__UpperCamelCase , __UpperCamelCase : List[str] = denominator, numerator
while True:
__UpperCamelCase : int = dividend % divisor
if remainder == 0:
break
__UpperCamelCase , __UpperCamelCase : List[Any] = divisor, remainder
__UpperCamelCase , __UpperCamelCase : Optional[int] = numerator / divisor, denominator / divisor
return int(snake_case__ ), int(snake_case__ )
if __name__ == "__main__":
print(f'{decimal_to_fraction(2) = }')
print(f'{decimal_to_fraction(89.0) = }')
print(f'{decimal_to_fraction("67") = }')
print(f'{decimal_to_fraction("45.0") = }')
print(f'{decimal_to_fraction(1.5) = }')
print(f'{decimal_to_fraction("6.25") = }')
print(f'{decimal_to_fraction("78td") = }')
| 399 |
'''simple docstring'''
from collections import deque
from .hash_table import HashTable
class A ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
def __init__(self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ (self , _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
__UpperCamelCase : str = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(_UpperCAmelCase )
__UpperCamelCase : Optional[Any] = self.values[key]
def a_ (self ) -> Any:
return (
sum(self.charge_factor - len(_UpperCAmelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def a_ (self , _UpperCAmelCase , _UpperCAmelCase=None ) -> Tuple:
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(_UpperCAmelCase ) == 0
):
return key
return super()._collision_resolution(_UpperCAmelCase , _UpperCAmelCase )
| 399 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json',
}
# fmt: off
__magic_name__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1_058, 1_220, 1_267, 1_279, 1_303, 1_343, 1_377,
1_391, 1_635, 1_782, 1_875, 2_162, 2_361, 2_488, 3_467, 4_008, 4_211,
4_600, 4_808, 5_299, 5_855, 6_329, 7_203, 9_609, 9_959, 10_563, 10_786,
11_420, 11_709, 11_907, 13_163, 13_697, 13_700, 14_808, 15_306, 16_410, 16_791,
17_992, 19_203, 19_510, 20_724, 22_305, 22_935, 27_007, 30_109, 30_420, 33_409,
34_949, 40_283, 40_493, 40_549, 47_282, 49_146, 50_257, 50_359, 50_360, 50_361
]
__magic_name__ = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1_350, 1_853, 1_982, 2_460, 2_627,
3_246, 3_253, 3_268, 3_536, 3_846, 3_961, 4_183, 4_667, 6_585, 6_647,
7_273, 9_061, 9_383, 10_428, 10_929, 11_938, 12_033, 12_331, 12_562, 13_793,
14_157, 14_635, 15_265, 15_618, 16_553, 16_604, 18_362, 18_956, 20_075, 21_675,
22_520, 26_130, 26_161, 26_435, 28_279, 29_464, 31_650, 32_302, 32_470, 36_865,
42_863, 47_425, 49_870, 50_254, 50_258, 50_360, 50_361, 50_362
]
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """whisper"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : str ,_a : Tuple=51865 ,_a : Dict=80 ,_a : int=6 ,_a : str=4 ,_a : Union[str, Any]=6 ,_a : Tuple=4 ,_a : str=1536 ,_a : int=1536 ,_a : Dict=0.0 ,_a : List[Any]=0.0 ,_a : List[str]=50257 ,_a : Tuple=True ,_a : Tuple=True ,_a : Tuple="gelu" ,_a : List[Any]=256 ,_a : Optional[int]=0.0 ,_a : List[Any]=0.0 ,_a : Optional[Any]=0.0 ,_a : str=0.02 ,_a : Optional[int]=False ,_a : List[str]=1500 ,_a : Tuple=448 ,_a : List[str]=50256 ,_a : Dict=50256 ,_a : Dict=50256 ,_a : Tuple=None ,_a : Any=[220, 50256] ,_a : Dict=False ,_a : Any=256 ,_a : Optional[int]=False ,_a : Optional[Any]=0.05 ,_a : Union[str, Any]=10 ,_a : List[str]=2 ,_a : str=0.0 ,_a : Dict=10 ,_a : Optional[Any]=0 ,_a : Any=7 ,**_a : Tuple ,):
'''simple docstring'''
A_ : List[Any] = vocab_size
A_ : int = num_mel_bins
A_ : Optional[int] = d_model
A_ : List[str] = encoder_layers
A_ : Optional[Any] = encoder_attention_heads
A_ : List[Any] = decoder_layers
A_ : Optional[Any] = decoder_attention_heads
A_ : List[Any] = decoder_ffn_dim
A_ : Union[str, Any] = encoder_ffn_dim
A_ : Optional[Any] = dropout
A_ : Union[str, Any] = attention_dropout
A_ : Any = activation_dropout
A_ : Optional[Any] = activation_function
A_ : List[str] = init_std
A_ : Dict = encoder_layerdrop
A_ : Dict = decoder_layerdrop
A_ : Any = use_cache
A_ : List[Any] = encoder_layers
A_ : Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Tuple = max_source_positions
A_ : Dict = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
A_ : List[str] = classifier_proj_size
A_ : Tuple = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
A_ : Tuple = apply_spec_augment
A_ : Union[str, Any] = mask_time_prob
A_ : Optional[int] = mask_time_length
A_ : int = mask_time_min_masks
A_ : Optional[int] = mask_feature_prob
A_ : Optional[int] = mask_feature_length
A_ : Any = mask_feature_min_masks
A_ : Any = median_filter_width
super().__init__(
pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,decoder_start_token_id=_a ,suppress_tokens=_a ,begin_suppress_tokens=_a ,**_a ,)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[str] = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
A_ : Optional[int] = {0: """batch"""}
else:
A_ : Optional[int] = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(_a ,direction="""inputs""" )
return common_inputs
def _a ( self : List[str] ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 22050 ,_a : float = 5.0 ,_a : int = 220 ,):
'''simple docstring'''
A_ : Tuple = OrderedDict()
A_ : List[str] = OnnxConfig.generate_dummy_inputs(
self ,preprocessor=preprocessor.feature_extractor ,batch_size=_a ,framework=_a ,sampling_rate=_a ,time_duration=_a ,frequency=_a ,)
A_ : List[Any] = encoder_inputs["""input_features"""].shape[2]
A_ : Optional[Any] = encoder_sequence_length // 2 if self.use_past else seq_length
A_ : Any = super().generate_dummy_inputs(
preprocessor.tokenizer ,_a ,_a ,_a ,_a )
A_ : Any = encoder_inputs.pop("""input_features""" )
A_ : Union[str, Any] = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
A_ : Any = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _a ( self : Dict ):
'''simple docstring'''
return 1e-3
| 665 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
__magic_name__ = {
'vocab_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/vocab.json',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/vocab.json'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/vocab.json'
),
},
'merges_file': {
'allenai/longformer-base-4096': 'https://huggingface.co/allenai/longformer-base-4096/resolve/main/merges.txt',
'allenai/longformer-large-4096': (
'https://huggingface.co/allenai/longformer-large-4096/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-finetuned-triviaqa': (
'https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/merges.txt'
),
'allenai/longformer-base-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
'allenai/longformer-large-4096-extra.pos.embd.only': (
'https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/merges.txt'
),
},
}
__magic_name__ = {
'allenai/longformer-base-4096': 4_096,
'allenai/longformer-large-4096': 4_096,
'allenai/longformer-large-4096-finetuned-triviaqa': 4_096,
'allenai/longformer-base-4096-extra.pos.embd.only': 4_096,
'allenai/longformer-large-4096-extra.pos.embd.only': 4_096,
}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def lowerCamelCase ( ):
A_ : Union[str, Any] = (
list(range(ord("""!""") , ord("""~""") + 1)) + list(range(ord("""¡""") , ord("""¬""") + 1)) + list(range(ord("""®""") , ord("""ÿ""") + 1))
)
A_ : Optional[Any] = bs[:]
A_ : List[str] = 0
for b in range(2**8):
if b not in bs:
bs.append(lowerCamelCase)
cs.append(2**8 + n)
n += 1
A_ : List[Any] = [chr(lowerCamelCase) for n in cs]
return dict(zip(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = set()
A_ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
A_ : List[str] = char
return pairs
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : int ,_a : Tuple ,_a : Union[str, Any] ,_a : Optional[Any]="replace" ,_a : Union[str, Any]="<s>" ,_a : Union[str, Any]="</s>" ,_a : int="</s>" ,_a : List[str]="<s>" ,_a : List[Any]="<unk>" ,_a : Any="<pad>" ,_a : Dict="<mask>" ,_a : Optional[int]=False ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[int] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : int = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Any = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
errors=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,add_prefix_space=_a ,**_a ,)
with open(_a ,encoding="""utf-8""" ) as vocab_handle:
A_ : str = json.load(_a )
A_ : Optional[int] = {v: k for k, v in self.encoder.items()}
A_ : List[str] = errors # how to handle errors in decoding
A_ : List[str] = bytes_to_unicode()
A_ : str = {v: k for k, v in self.byte_encoder.items()}
with open(_a ,encoding="""utf-8""" ) as merges_handle:
A_ : Any = merges_handle.read().split("""\n""" )[1:-1]
A_ : str = [tuple(merge.split() ) for merge in bpe_merges]
A_ : int = dict(zip(_a ,range(len(_a ) ) ) )
A_ : List[Any] = {}
A_ : Optional[int] = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
A_ : Optional[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def _a ( self : Any ):
'''simple docstring'''
return len(self.encoder )
def _a ( self : str ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def _a ( self : int ,_a : int ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
A_ : Optional[int] = tuple(_a )
A_ : Any = get_pairs(_a )
if not pairs:
return token
while True:
A_ : Optional[Any] = min(_a ,key=lambda _a : self.bpe_ranks.get(_a ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
A_ , A_ : Dict = bigram
A_ : int = []
A_ : Optional[Any] = 0
while i < len(_a ):
try:
A_ : List[str] = word.index(_a ,_a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A_ : Tuple = j
if word[i] == first and i < len(_a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A_ : str = tuple(_a )
A_ : str = new_word
if len(_a ) == 1:
break
else:
A_ : int = get_pairs(_a )
A_ : Optional[int] = """ """.join(_a )
A_ : List[str] = word
return word
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
A_ : Any = []
for token in re.findall(self.pat ,_a ):
A_ : Any = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_a ).split(""" """ ) )
return bpe_tokens
def _a ( self : Union[str, Any] ,_a : Optional[int] ):
'''simple docstring'''
return self.encoder.get(_a ,self.encoder.get(self.unk_token ) )
def _a ( self : int ,_a : Dict ):
'''simple docstring'''
return self.decoder.get(_a )
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = """""".join(_a )
A_ : Dict = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" ,errors=self.errors )
return text
def _a ( self : int ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
A_ : int = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_a ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=_a ,ensure_ascii=_a ) + """\n""" )
A_ : int = 0
with open(_a ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda _a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
""" Please check that the tokenizer is not corrupted!""" )
A_ : Dict = token_index
writer.write(""" """.join(_a ) + """\n""" )
index += 1
return vocab_file, merge_file
def _a ( self : List[str] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : int = [self.cls_token_id]
A_ : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _a ( self : str ,_a : Optional[int] ,_a : Union[str, Any]=False ,**_a : Dict ):
'''simple docstring'''
A_ : Any = kwargs.pop("""add_prefix_space""" ,self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_a ) > 0 and not text[0].isspace()):
A_ : Optional[int] = """ """ + text
return (text, kwargs)
| 665 | 1 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 50 ):
UpperCAmelCase : Optional[int] = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 701 |
'''simple docstring'''
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : int = sin(UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = cos(UpperCAmelCase_ )
UpperCAmelCase : Dict = _sin / (2 * q_factor)
UpperCAmelCase : Tuple = (1 - _cos) / 2
UpperCAmelCase : Dict = 1 - _cos
UpperCAmelCase : Union[str, Any] = 1 + alpha
UpperCAmelCase : List[str] = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha
UpperCAmelCase : Tuple = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[str] = cos(UpperCAmelCase_ )
UpperCAmelCase : Tuple = _sin / (2 * q_factor)
UpperCAmelCase : int = (1 + _cos) / 2
UpperCAmelCase : List[Any] = -1 - _cos
UpperCAmelCase : Optional[int] = 1 + alpha
UpperCAmelCase : Any = -2 * _cos
UpperCAmelCase : str = 1 - alpha
UpperCAmelCase : Dict = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : List[Any] = tau * frequency / samplerate
UpperCAmelCase : Union[str, Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : Any = _sin / (2 * q_factor)
UpperCAmelCase : Optional[Any] = _sin / 2
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : int = -ba
UpperCAmelCase : List[str] = 1 + alpha
UpperCAmelCase : int = -2 * _cos
UpperCAmelCase : Optional[int] = 1 - alpha
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) ):
UpperCAmelCase : int = tau * frequency / samplerate
UpperCAmelCase : Tuple = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : int = 1 - alpha
UpperCAmelCase : Dict = -2 * _cos
UpperCAmelCase : Any = 1 + alpha
UpperCAmelCase : Optional[Any] = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Optional[int] = tau * frequency / samplerate
UpperCAmelCase : List[Any] = sin(UpperCAmelCase_ )
UpperCAmelCase : Dict = cos(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = _sin / (2 * q_factor)
UpperCAmelCase : Union[str, Any] = 10 ** (gain_db / 40)
UpperCAmelCase : int = 1 + alpha * big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : List[Any] = 1 - alpha * big_a
UpperCAmelCase : Tuple = 1 + alpha / big_a
UpperCAmelCase : Tuple = -2 * _cos
UpperCAmelCase : int = 1 - alpha / big_a
UpperCAmelCase : List[Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Dict = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : Any = cos(UpperCAmelCase_ )
UpperCAmelCase : str = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : Dict = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : List[str] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : List[Any] = big_a * (pmc + aaa)
UpperCAmelCase : Optional[int] = 2 * big_a * mpc
UpperCAmelCase : Optional[int] = big_a * (pmc - aaa)
UpperCAmelCase : str = ppmc + aaa
UpperCAmelCase : int = -2 * pmpc
UpperCAmelCase : int = ppmc - aaa
UpperCAmelCase : Any = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1 / sqrt(2 ) , ):
UpperCAmelCase : Tuple = tau * frequency / samplerate
UpperCAmelCase : List[str] = sin(UpperCAmelCase_ )
UpperCAmelCase : List[Any] = cos(UpperCAmelCase_ )
UpperCAmelCase : int = _sin / (2 * q_factor)
UpperCAmelCase : List[str] = 10 ** (gain_db / 40)
UpperCAmelCase : str = (big_a + 1) - (big_a - 1) * _cos
UpperCAmelCase : List[Any] = (big_a + 1) + (big_a - 1) * _cos
UpperCAmelCase : int = (big_a - 1) - (big_a + 1) * _cos
UpperCAmelCase : Tuple = (big_a - 1) + (big_a + 1) * _cos
UpperCAmelCase : Optional[Any] = 2 * sqrt(UpperCAmelCase_ ) * alpha
UpperCAmelCase : Dict = big_a * (ppmc + aaa)
UpperCAmelCase : List[str] = -2 * big_a * pmpc
UpperCAmelCase : int = big_a * (ppmc - aaa)
UpperCAmelCase : Dict = pmc + aaa
UpperCAmelCase : Optional[int] = 2 * mpc
UpperCAmelCase : int = pmc - aaa
UpperCAmelCase : Union[str, Any] = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 695 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : List[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : List[str] = {
'''SCUT-DLVCLab/lilt-roberta-en-base''': (
'''https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'''
),
}
class UpperCamelCase_ ( __UpperCamelCase ):
'''simple docstring'''
UpperCamelCase_ = """lilt"""
def __init__( self , UpperCamelCase=3_05_22 , UpperCamelCase=7_68 , UpperCamelCase=12 , UpperCamelCase=12 , UpperCamelCase=30_72 , UpperCamelCase="gelu" , UpperCamelCase=0.1 , UpperCamelCase=0.1 , UpperCamelCase=5_12 , UpperCamelCase=2 , UpperCamelCase=0.02 , UpperCamelCase=1E-12 , UpperCamelCase=0 , UpperCamelCase="absolute" , UpperCamelCase=None , UpperCamelCase=4 , UpperCamelCase=10_24 , **UpperCamelCase , ) -> Optional[Any]:
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase)
UpperCamelCase__ : int = vocab_size
UpperCamelCase__ : Dict = hidden_size
UpperCamelCase__ : Any = num_hidden_layers
UpperCamelCase__ : Optional[Any] = num_attention_heads
UpperCamelCase__ : List[Any] = hidden_act
UpperCamelCase__ : Optional[int] = intermediate_size
UpperCamelCase__ : Dict = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : Union[str, Any] = max_position_embeddings
UpperCamelCase__ : str = type_vocab_size
UpperCamelCase__ : Dict = initializer_range
UpperCamelCase__ : Tuple = layer_norm_eps
UpperCamelCase__ : Union[str, Any] = position_embedding_type
UpperCamelCase__ : Any = classifier_dropout
UpperCamelCase__ : Any = channel_shrink_ratio
UpperCamelCase__ : Dict = max_ad_position_embeddings
| 410 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
UpperCAmelCase__ : Optional[int] = False
@skip_mps
class UpperCamelCase_ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase_ = StableDiffusionAttendAndExcitePipeline
UpperCamelCase_ = False
UpperCamelCase_ = TEXT_TO_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
UpperCamelCase_ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def lowerCAmelCase__ ( cls) -> str:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase)
@classmethod
def lowerCAmelCase__ ( cls) -> Tuple:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Any:
torch.manual_seed(0)
UpperCamelCase__ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=UpperCamelCase , )
UpperCamelCase__ : Any = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , )
torch.manual_seed(0)
UpperCamelCase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0)
UpperCamelCase__ : str = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
UpperCamelCase__ : List[Any] = CLIPTextModel(UpperCamelCase)
UpperCamelCase__ : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip')
UpperCamelCase__ : Tuple = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCAmelCase__ ( self , UpperCamelCase , UpperCamelCase=0) -> str:
if str(UpperCamelCase).startswith('mps'):
UpperCamelCase__ : Tuple = torch.manual_seed(UpperCamelCase)
else:
UpperCamelCase__ : List[Any] = torch.Generator(device=UpperCamelCase).manual_seed(UpperCamelCase)
UpperCamelCase__ : List[Any] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def lowerCAmelCase__ ( self) -> Union[str, Any]:
UpperCamelCase__ : List[Any] = 'cpu'
UpperCamelCase__ : Union[str, Any] = self.get_dummy_components()
UpperCamelCase__ : Optional[Any] = self.pipeline_class(**UpperCamelCase)
pipe.to(UpperCamelCase)
pipe.set_progress_bar_config(disable=UpperCamelCase)
UpperCamelCase__ : Tuple = self.get_dummy_inputs(UpperCamelCase)
UpperCamelCase__ : Union[str, Any] = pipe(**UpperCamelCase).images
UpperCamelCase__ : Optional[Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3))
UpperCamelCase__ : str = np.array(
[0.6390_5364, 0.6289_7307, 0.4859_9017, 0.513_3624, 0.555_0048, 0.4576_9516, 0.5032_6973, 0.502_3139, 0.4538_4496])
UpperCamelCase__ : List[str] = np.abs(image_slice.flatten() - expected_slice).max()
self.assertLessEqual(UpperCamelCase , 1E-3)
def lowerCAmelCase__ ( self) -> Dict:
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4)
def lowerCAmelCase__ ( self) -> str:
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2])
def lowerCAmelCase__ ( self) -> List[str]:
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4)
def lowerCAmelCase__ ( self) -> List[Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3)
def lowerCAmelCase__ ( self) -> Optional[Any]:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4)
def lowerCAmelCase__ ( self) -> Dict:
super().test_save_load_local(expected_max_difference=5E-4)
def lowerCAmelCase__ ( self) -> Dict:
super().test_save_load_optional_components(expected_max_difference=4E-4)
@require_torch_gpu
@slow
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def lowerCAmelCase__ ( cls) -> Optional[int]:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCamelCase)
@classmethod
def lowerCAmelCase__ ( cls) -> str:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCamelCase)
def lowerCAmelCase__ ( self) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self) -> int:
UpperCamelCase__ : Dict = torch.manual_seed(51)
UpperCamelCase__ : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=UpperCamelCase , torch_dtype=torch.floataa)
pipe.to('cuda')
UpperCamelCase__ : Optional[int] = 'a painting of an elephant with glasses'
UpperCamelCase__ : Dict = [5, 7]
UpperCamelCase__ : Optional[int] = pipe(
prompt=UpperCamelCase , token_indices=UpperCamelCase , guidance_scale=7.5 , generator=UpperCamelCase , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
UpperCamelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy')
assert np.abs((expected_image - image).max()) < 5E-1
| 410 | 1 |
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def lowerCAmelCase__ ( lowerCamelCase_ : Dict ,lowerCamelCase_ : Any ,lowerCamelCase_ : List[str]):
'''simple docstring'''
lowerCAmelCase__ : str = 1.5
lowerCAmelCase__ : Union[str, Any] = int(factor * num_class_images)
lowerCAmelCase__ : Union[str, Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' ,indice_name='''laion_400m''' ,num_images=lowerCamelCase_ ,aesthetic_weight=0.1)
os.makedirs(f"""{class_data_dir}/images""" ,exist_ok=lowerCamelCase_)
if len(list(Path(f"""{class_data_dir}/images""").iterdir())) >= num_class_images:
return
while True:
lowerCAmelCase__ : Any = client.query(text=lowerCamelCase_)
if len(lowerCamelCase_) >= factor * num_class_images or num_images > 1E4:
break
else:
lowerCAmelCase__ : List[Any] = int(factor * num_images)
lowerCAmelCase__ : Optional[int] = ClipClient(
url='''https://knn.laion.ai/knn-service''' ,indice_name='''laion_400m''' ,num_images=lowerCamelCase_ ,aesthetic_weight=0.1 ,)
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[str] = tqdm(desc='''downloading real regularization images''' ,total=lowerCamelCase_)
with open(f"""{class_data_dir}/caption.txt""" ,'''w''') as fa, open(f"""{class_data_dir}/urls.txt""" ,'''w''') as fa, open(
f"""{class_data_dir}/images.txt""" ,'''w''') as fa:
while total < num_class_images:
lowerCAmelCase__ : List[str] = class_images[count]
count += 1
try:
lowerCAmelCase__ : List[str] = requests.get(images['''url'''])
if img.status_code == 200:
lowerCAmelCase__ : List[Any] = Image.open(BytesIO(img.content))
with open(f"""{class_data_dir}/images/{total}.jpg""" ,'''wb''') as f:
f.write(img.content)
fa.write(images['''caption'''] + '''\n''')
fa.write(images['''url'''] + '''\n''')
fa.write(f"""{class_data_dir}/images/{total}.jpg""" + '''\n''')
total += 1
pbar.update(1)
else:
continue
except Exception:
continue
return
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = argparse.ArgumentParser('''''' ,add_help=lowerCamelCase_)
parser.add_argument('''--class_prompt''' ,help='''text prompt to retrieve images''' ,required=lowerCamelCase_ ,type=lowerCamelCase_)
parser.add_argument('''--class_data_dir''' ,help='''path to save images''' ,required=lowerCamelCase_ ,type=lowerCamelCase_)
parser.add_argument('''--num_class_images''' ,help='''number of images to download''' ,default=200 ,type=lowerCamelCase_)
return parser.parse_args()
if __name__ == "__main__":
__snake_case : Union[str, Any] =parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 90 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, XLMRobertaTokenizer
from diffusers import AltDiffusionPipeline, AutoencoderKL, DDIMScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =AltDiffusionPipeline
snake_case_ =TEXT_TO_IMAGE_PARAMS
snake_case_ =TEXT_TO_IMAGE_BATCH_PARAMS
snake_case_ =TEXT_TO_IMAGE_IMAGE_PARAMS
snake_case_ =TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : int = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=32 ,)
lowerCAmelCase__ : List[str] = DDIMScheduler(
beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule='''scaled_linear''' ,clip_sample=__lowerCamelCase ,set_alpha_to_one=__lowerCamelCase ,)
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
# TODO: address the non-deterministic text encoder (fails for save-load tests)
# torch.manual_seed(0)
# text_encoder_config = RobertaSeriesConfig(
# hidden_size=32,
# project_dim=32,
# intermediate_size=37,
# layer_norm_eps=1e-05,
# num_attention_heads=4,
# num_hidden_layers=5,
# vocab_size=5002,
# )
# text_encoder = RobertaSeriesModelWithTransformation(text_encoder_config)
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,projection_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=50_02 ,)
lowerCAmelCase__ : int = CLIPTextModel(__lowerCamelCase )
lowerCAmelCase__ : str = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowerCAmelCase__ : Union[str, Any] = 77
lowerCAmelCase__ : str = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Optional[Any]:
"""simple docstring"""
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Tuple = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : Any = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : int = self.get_dummy_components()
torch.manual_seed(0 )
lowerCAmelCase__ : str = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,)
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ : Any = RobertaSeriesModelWithTransformation(__lowerCamelCase )
lowerCAmelCase__ : List[str] = text_encoder
lowerCAmelCase__ : List[Any] = AltDiffusionPipeline(**__lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : List[str] = self.get_dummy_inputs(__lowerCamelCase )
lowerCAmelCase__ : str = '''A photo of an astronaut'''
lowerCAmelCase__ : Any = alt_pipe(**__lowerCamelCase )
lowerCAmelCase__ : int = output.images
lowerCAmelCase__ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[Any] = np.array(
[0.574_8162, 0.6044_7145, 0.4882_1217, 0.5010_0636, 0.543_1185, 0.4576_3683, 0.4965_7696, 0.4813_2733, 0.4757_3093] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : str = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = PNDMScheduler(skip_prk_steps=__lowerCamelCase )
torch.manual_seed(0 )
lowerCAmelCase__ : int = RobertaSeriesConfig(
hidden_size=32 ,project_dim=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,vocab_size=50_02 ,)
# TODO: remove after fixing the non-deterministic text encoder
lowerCAmelCase__ : Tuple = RobertaSeriesModelWithTransformation(__lowerCamelCase )
lowerCAmelCase__ : List[str] = text_encoder
lowerCAmelCase__ : List[Any] = AltDiffusionPipeline(**__lowerCamelCase )
lowerCAmelCase__ : str = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = alt_pipe(**__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = output.images
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Union[str, Any] = np.array(
[0.5160_5093, 0.570_7241, 0.4736_5507, 0.5057_8886, 0.563_3877, 0.464_2503, 0.518_2081, 0.4876_3484, 0.4908_4237] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> List[Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' ,safety_checker=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : Any = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : List[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : str = alt_pipe([prompt] ,generator=__lowerCamelCase ,guidance_scale=6.0 ,num_inference_steps=20 ,output_type='''np''' )
lowerCAmelCase__ : str = output.images
lowerCAmelCase__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ : Dict = np.array([0.1010, 0.0800, 0.0794, 0.0885, 0.0843, 0.0762, 0.0769, 0.0729, 0.0586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = DDIMScheduler.from_pretrained('''BAAI/AltDiffusion''' ,subfolder='''scheduler''' )
lowerCAmelCase__ : List[str] = AltDiffusionPipeline.from_pretrained('''BAAI/AltDiffusion''' ,scheduler=__lowerCamelCase ,safety_checker=__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = alt_pipe.to(__lowerCamelCase )
alt_pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = '''A painting of a squirrel eating a burger'''
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = alt_pipe([prompt] ,generator=__lowerCamelCase ,num_inference_steps=2 ,output_type='''numpy''' )
lowerCAmelCase__ : List[str] = output.images
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_12, 5_12, 3)
lowerCAmelCase__ : List[Any] = np.array([0.4019, 0.4052, 0.3810, 0.4119, 0.3916, 0.3982, 0.4651, 0.4195, 0.5323] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 90 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {"vocab_file": "spm_char.model"}
__lowerCAmelCase = {
"vocab_file": {
"microsoft/speecht5_asr": "https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model",
"microsoft/speecht5_tts": "https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model",
"microsoft/speecht5_vc": "https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model",
}
}
__lowerCAmelCase = {
"microsoft/speecht5_asr": 1_0_2_4,
"microsoft/speecht5_tts": 1_0_2_4,
"microsoft/speecht5_vc": 1_0_2_4,
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : int = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : int = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : str = ["""input_ids""", """attention_mask"""]
def __init__( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Tuple="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : Any="<unk>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Optional[int] , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def UpperCAmelCase__ ( self : str ):
return self.sp_model.get_piece_size()
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Any ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__ ( self : int , __UpperCamelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : List[str] , __UpperCamelCase : Optional[Any] ):
return self.sp_model.piece_to_id(__UpperCamelCase )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = []
_UpperCAmelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
_UpperCAmelCase = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def UpperCAmelCase__ ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any]=None ):
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
_UpperCAmelCase = [1]
if token_ids_a is None:
return ([0] * len(__UpperCamelCase )) + suffix_ones
return ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones
def UpperCAmelCase__ ( self : int , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 684 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCAmelCase = random.Random()
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase=1.0 , _lowerCAmelCase=None , _lowerCAmelCase=None ) -> List[str]:
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( unittest.TestCase):
def __init__( self : Optional[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Union[str, Any]=400 , __UpperCamelCase : List[Any]=2_000 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Optional[int]=160 , __UpperCamelCase : Any=8 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=4_000 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Tuple=True , ):
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
_UpperCAmelCase = feature_size
_UpperCAmelCase = chunk_length
_UpperCAmelCase = hop_length
def UpperCAmelCase__ ( self : Optional[Any] ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=False ):
def _flatten(__UpperCamelCase : Any ):
return list(itertools.chain(*__UpperCamelCase ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __SCREAMING_SNAKE_CASE ( lowercase , unittest.TestCase):
__SCREAMING_SNAKE_CASE : str = WhisperFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = WhisperFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : str ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = feat_extract_first.save_pretrained(__UpperCamelCase )[0]
check_json_file_has_correct_format(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(__UpperCamelCase , "feat_extract.json" )
feat_extract_first.to_json_file(__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class.from_json_file(__UpperCamelCase )
_UpperCAmelCase = feat_extract_first.to_dict()
_UpperCAmelCase = feat_extract_second.to_dict()
_UpperCAmelCase = feat_extract_first.mel_filters
_UpperCAmelCase = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def UpperCAmelCase__ ( self : int ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(__UpperCamelCase , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_UpperCAmelCase = np.asarray(__UpperCamelCase )
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
# Test truncation required
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(200 , (feature_extractor.n_samples + 500) , 200 )]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs]
_UpperCAmelCase = [x[: feature_extractor.n_samples] for x in speech_inputs]
_UpperCAmelCase = [np.asarray(__UpperCamelCase ) for speech_input in speech_inputs_truncated]
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertTrue(np.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(100 , 32 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : Tuple ):
_UpperCAmelCase = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort("id" ).select(range(__UpperCamelCase ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Tuple ):
# fmt: off
_UpperCAmelCase = torch.tensor(
[
0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951,
0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678,
0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554,
-0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = WhisperFeatureExtractor()
_UpperCAmelCase = feature_extractor(__UpperCamelCase , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] , __UpperCamelCase , atol=1e-4 ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = self._load_datasamples(1 )[0]
_UpperCAmelCase = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
_UpperCAmelCase = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__UpperCamelCase )[0]
self.assertTrue(np.all(np.mean(__UpperCamelCase ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__UpperCamelCase ) - 1 ) < 1e-3 ) )
| 684 | 1 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : Union[str, Any] , UpperCAmelCase_ : Optional[NestedDataStructureLike[PathLike]] = None , UpperCAmelCase_ : Optional[NamedSplit] = None , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : List[Any] , ) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Optional[int] =path_or_paths
lowerCamelCase__: List[str] =split if split or isinstance(UpperCAmelCase_ , UpperCAmelCase_) else "train"
lowerCamelCase__: Optional[int] =features
lowerCamelCase__: Any =cache_dir
lowerCamelCase__: Any =keep_in_memory
lowerCamelCase__: Optional[Any] =streaming
lowerCamelCase__: Tuple =num_proc
lowerCamelCase__: Tuple =kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[Dataset, DatasetDict, IterableDataset, IterableDatasetDict]:
'''simple docstring'''
pass
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__(self : List[str] , UpperCAmelCase_ : Optional[Features] = None , UpperCAmelCase_ : str = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : str , ) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =features
lowerCamelCase__: Optional[Any] =cache_dir
lowerCamelCase__: Optional[Any] =keep_in_memory
lowerCamelCase__: List[Any] =streaming
lowerCamelCase__: Any =num_proc
lowerCamelCase__: str =kwargs
@abstractmethod
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Union[Dataset, IterableDataset]:
'''simple docstring'''
pass
| 717 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Tuple:
'''simple docstring'''
lowerCamelCase__: Dict =self.config_class(**self.inputs_dict)
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "tf_padding"))
self.parent.assertTrue(hasattr(UpperCAmelCase_ , "depth_multiplier"))
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : List[Any]=13 , UpperCAmelCase_ : Optional[Any]=3 , UpperCAmelCase_ : Optional[Any]=32 , UpperCAmelCase_ : Any=0.25 , UpperCAmelCase_ : int=8 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : Any=6 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : Any=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Dict="relu6" , UpperCAmelCase_ : Optional[int]=1_280 , UpperCAmelCase_ : List[str]=0.1 , UpperCAmelCase_ : Tuple=0.02 , UpperCAmelCase_ : Optional[Any]=True , UpperCAmelCase_ : Tuple=True , UpperCAmelCase_ : Optional[Any]=10 , UpperCAmelCase_ : Optional[int]=None , ) ->Dict:
'''simple docstring'''
lowerCamelCase__: Any =parent
lowerCamelCase__: Optional[Any] =batch_size
lowerCamelCase__: List[str] =num_channels
lowerCamelCase__: Dict =image_size
lowerCamelCase__: Tuple =depth_multiplier
lowerCamelCase__: Tuple =depth_divisible_by
lowerCamelCase__: List[str] =min_depth
lowerCamelCase__: List[str] =expand_ratio
lowerCamelCase__: Union[str, Any] =tf_padding
lowerCamelCase__: Optional[Any] =output_stride
lowerCamelCase__: Tuple =first_layer_is_expansion
lowerCamelCase__: Any =finegrained_output
lowerCamelCase__: Union[str, Any] =hidden_act
lowerCamelCase__: Union[str, Any] =last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier)
lowerCamelCase__: int =classifier_dropout_prob
lowerCamelCase__: List[str] =use_labels
lowerCamelCase__: Any =is_training
lowerCamelCase__: Dict =num_labels
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: List[Any] =scope
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
lowerCamelCase__: Dict =None
lowerCamelCase__: int =None
if self.use_labels:
lowerCamelCase__: List[str] =ids_tensor([self.batch_size] , self.num_labels)
lowerCamelCase__: List[Any] =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels)
lowerCamelCase__: Any =self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Optional[int]:
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: List[str] =MobileNetVaModel(config=UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Dict =model(UpperCAmelCase_)
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: int =self.num_labels
lowerCamelCase__: Optional[int] =MobileNetVaForImageClassification(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Tuple =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def SCREAMING_SNAKE_CASE_ (self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : List[Any]) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.num_labels
lowerCamelCase__: List[str] =MobileNetVaForSemanticSegmentation(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
lowerCamelCase__: Any =model(UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase__: List[str] =model(UpperCAmelCase_ , labels=UpperCAmelCase_)
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Any =self.prepare_config_and_inputs()
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: Tuple =config_and_inputs
lowerCamelCase__: Tuple ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
lowercase_ = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase_ = False
lowercase_ = False
lowercase_ = False
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Dict) ->Optional[Any]:
'''simple docstring'''
lowerCamelCase__: Optional[Any] =MobileNetVaModelTester(self)
lowerCamelCase__: Union[str, Any] =MobileNetVaConfigTester(self , config_class=UpperCAmelCase_ , has_text_modality=UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileNetV2 does not use inputs_embeds")
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not support input and output embeddings")
def SCREAMING_SNAKE_CASE_ (self : int) ->str:
'''simple docstring'''
pass
@unittest.skip(reason="MobileNetV2 does not output attentions")
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ (self : str) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Optional[Any] =model_class(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase__: Tuple =[*signature.parameters.keys()]
lowerCamelCase__: Union[str, Any] =["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Any:
'''simple docstring'''
lowerCamelCase__: Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Any:
'''simple docstring'''
def check_hidden_states_output(UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : str):
lowerCamelCase__: List[str] =model_class(UpperCAmelCase_)
model.to(UpperCAmelCase_)
model.eval()
with torch.no_grad():
lowerCamelCase__: Any =model(**self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_))
lowerCamelCase__: Optional[Any] =outputs.hidden_states
lowerCamelCase__: List[str] =16
self.assertEqual(len(UpperCAmelCase_) , UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase__: Union[str, Any] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase__: Optional[int] =True
check_hidden_states_output(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->Dict:
'''simple docstring'''
lowerCamelCase__: int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__: Optional[int] =MobileNetVaModel.from_pretrained(UpperCAmelCase_)
self.assertIsNotNone(UpperCAmelCase_)
def lowerCAmelCase_ ( ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[Any] =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->List[str]:
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224") if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224").to(UpperCAmelCase_)
lowerCamelCase__: Dict =self.default_image_processor
lowerCamelCase__: str =prepare_img()
lowerCamelCase__: int =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: str =model(**UpperCAmelCase_)
# verify the logits
lowerCamelCase__: Optional[Any] =torch.Size((1, 1_001))
self.assertEqual(outputs.logits.shape , UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.tensor([0.2445, -1.1993, 0.1905]).to(UpperCAmelCase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCAmelCase_ , atol=1E-4))
@slow
def SCREAMING_SNAKE_CASE_ (self : int) ->List[str]:
'''simple docstring'''
lowerCamelCase__: List[Any] =MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowerCamelCase__: str =model.to(UpperCAmelCase_)
lowerCamelCase__: List[Any] =MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513")
lowerCamelCase__: int =prepare_img()
lowerCamelCase__: int =image_processor(images=UpperCAmelCase_ , return_tensors="pt").to(UpperCAmelCase_)
# forward pass
with torch.no_grad():
lowerCamelCase__: str =model(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =outputs.logits
# verify the logits
lowerCamelCase__: Optional[int] =torch.Size((1, 21, 65, 65))
self.assertEqual(logits.shape , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=UpperCAmelCase_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , UpperCAmelCase_ , atol=1E-4))
| 437 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = XGLMTokenizer
__UpperCAmelCase : Union[str, Any] = XGLMTokenizerFast
__UpperCAmelCase : List[Any] = True
__UpperCAmelCase : Optional[Any] = True
def __snake_case ( self : str ) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case : Optional[int] = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def __snake_case ( self : str ) -> List[Any]:
__snake_case : Optional[int] = "<pad>"
__snake_case : Optional[int] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCamelCase ) , lowerCamelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCamelCase ) , lowerCamelCase )
def __snake_case ( self : int ) -> Any:
__snake_case : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(len(lowerCamelCase ) , 1008 )
def __snake_case ( self : str ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1008 )
def __snake_case ( self : Optional[int] ) -> Any:
__snake_case : Any = XGLMTokenizer(lowerCamelCase , keep_accents=lowerCamelCase )
__snake_case : Dict = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCamelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case : Dict = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__snake_case : Any = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case : Dict = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __snake_case ( self : List[Any] ) -> Optional[int]:
return XGLMTokenizer.from_pretrained("facebook/xglm-564M" )
def __snake_case ( self : Optional[int] ) -> Optional[Any]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCamelCase , f.name )
__snake_case : int = XGLMTokenizer(f.name , keep_accents=lowerCamelCase )
__snake_case : List[str] = pickle.dumps(lowerCamelCase )
pickle.loads(lowerCamelCase )
def __snake_case ( self : Optional[Any] ) -> List[str]:
if not self.test_rust_tokenizer:
return
__snake_case : Any = self.get_tokenizer()
__snake_case : List[str] = self.get_rust_tokenizer()
__snake_case : Optional[Any] = "I was born in 92000, and this is falsé."
__snake_case : Optional[int] = tokenizer.tokenize(lowerCamelCase )
__snake_case : List[str] = rust_tokenizer.tokenize(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__snake_case : List[Any] = tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
__snake_case : Any = rust_tokenizer.encode(lowerCamelCase , add_special_tokens=lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
__snake_case : int = self.get_rust_tokenizer()
__snake_case : Optional[int] = tokenizer.encode(lowerCamelCase )
__snake_case : List[Any] = rust_tokenizer.encode(lowerCamelCase )
self.assertListEqual(lowerCamelCase , lowerCamelCase )
@slow
def __snake_case ( self : Optional[Any] ) -> List[str]:
__snake_case : List[str] = "Hello World!"
__snake_case : str = [2, 31227, 4447, 35]
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __snake_case ( self : Any ) -> List[Any]:
__snake_case : Optional[int] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth"
)
# fmt: off
__snake_case : Any = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCamelCase , self.big_tokenizer.encode(lowerCamelCase ) )
@slow
def __snake_case ( self : str ) -> int:
# fmt: off
__snake_case : List[str] = {
"input_ids": [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCamelCase , model_name="facebook/xglm-564M" , padding=lowerCamelCase , )
| 81 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase : List[str] = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase : Union[str, Any] = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 405 | 0 |
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def __lowerCAmelCase ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Optional[Any] = [False] * len(__UpperCamelCase )
snake_case_ : str = [s]
snake_case_ : Tuple = True
while queue:
snake_case_ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__UpperCamelCase )
snake_case_ : Dict = True
snake_case_ : Tuple = u
return visited[t]
def __lowerCAmelCase ( __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict ):
'''simple docstring'''
snake_case_ : Union[str, Any] = [-1] * (len(__UpperCamelCase ))
snake_case_ : Any = 0
snake_case_ : Optional[Any] = []
snake_case_ : List[str] = [i[:] for i in graph] # Record original cut, copy.
while bfs(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
snake_case_ : Dict = float("""Inf""" )
snake_case_ : List[Any] = sink
while s != source:
# Find the minimum value in select path
snake_case_ : Optional[int] = min(__UpperCamelCase , graph[parent[s]][s] )
snake_case_ : Dict = parent[s]
max_flow += path_flow
snake_case_ : Tuple = sink
while v != source:
snake_case_ : int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
snake_case_ : Optional[int] = parent[v]
for i in range(len(__UpperCamelCase ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 21 |
"""simple docstring"""
# Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################################
#
# Note: If when running this conversion script you're getting an exception:
# ModuleNotFoundError: No module named 'megatron.model.enums'
# you need to tell python where to find the clone of Megatron-LM, e.g.:
#
# cd /tmp
# git clone https://github.com/NVIDIA/Megatron-LM
# PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_gpt2/convert_megatron_gpt2_checkpoint.py ...
#
# if you already have it cloned elsewhere, simply adjust the path to the existing path
#
# If the training was done using a Megatron-LM fork, e.g.,
# https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one
# in your path, i.e., /path/to/Megatron-DeepSpeed/
#
import argparse
import os
import re
import zipfile
import torch
from transformers import AutoTokenizer, GPTaConfig
def __lowerCAmelCase ( __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Any=0 ):
'''simple docstring'''
if name is None:
snake_case_ : Dict = None
else:
snake_case_ : Dict = """.""" * max(0 , spaces - 2 ) + """# {:""" + str(5_0 - spaces ) + """s}"""
snake_case_ : Any = fmt.format(__UpperCamelCase )
# Print and recurse (if needed).
if isinstance(__UpperCamelCase , __UpperCamelCase ):
if msg is not None:
print(__UpperCamelCase )
for k in val.keys():
recursive_print(__UpperCamelCase , val[k] , spaces + 2 )
elif isinstance(__UpperCamelCase , torch.Tensor ):
print(__UpperCamelCase , """:""" , val.size() )
else:
print(__UpperCamelCase , """:""" , __UpperCamelCase )
def __lowerCAmelCase ( __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str , __UpperCamelCase : Any ):
'''simple docstring'''
snake_case_ : Any = param.size()
if checkpoint_version == 1.0:
# version 1.0 stores [num_heads * hidden_size * num_splits, :]
snake_case_ : List[str] = (num_heads, hidden_size, num_splits) + input_shape[1:]
snake_case_ : Tuple = param.view(*__UpperCamelCase )
snake_case_ : Tuple = param.transpose(0 , 2 )
snake_case_ : Any = param.transpose(1 , 2 ).contiguous()
elif checkpoint_version >= 2.0:
# other versions store [num_heads * num_splits * hidden_size, :]
snake_case_ : Optional[Any] = (num_heads, num_splits, hidden_size) + input_shape[1:]
snake_case_ : str = param.view(*__UpperCamelCase )
snake_case_ : Dict = param.transpose(0 , 1 ).contiguous()
snake_case_ : int = param.view(*__UpperCamelCase )
return param
def __lowerCAmelCase ( __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
snake_case_ : Dict = {}
# old versions did not store training args
snake_case_ : List[str] = input_state_dict.get("""args""" , __UpperCamelCase )
if ds_args is not None:
# do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint
# from pprint import pprint
# pprint(vars(ds_args))
snake_case_ : Tuple = ds_args.padded_vocab_size
snake_case_ : Optional[int] = ds_args.max_position_embeddings
snake_case_ : Union[str, Any] = ds_args.hidden_size
snake_case_ : Union[str, Any] = ds_args.num_layers
snake_case_ : str = ds_args.num_attention_heads
snake_case_ : str = ds_args.ffn_hidden_size
# pprint(config)
# The number of heads.
snake_case_ : Union[str, Any] = config.n_head
# The hidden_size per head.
snake_case_ : Optional[Any] = config.n_embd // config.n_head
# Megatron-LM checkpoint version
if "checkpoint_version" in input_state_dict.keys():
snake_case_ : Optional[Any] = input_state_dict["""checkpoint_version"""]
else:
snake_case_ : int = 0.0
# The model.
snake_case_ : List[str] = input_state_dict["""model"""]
# The language model.
snake_case_ : str = model["""language_model"""]
# The embeddings.
snake_case_ : Tuple = lm["""embedding"""]
# The word embeddings.
snake_case_ : List[str] = embeddings["""word_embeddings"""]["""weight"""]
# Truncate the embedding table to vocab_size rows.
snake_case_ : Optional[int] = word_embeddings[: config.vocab_size, :]
snake_case_ : Optional[int] = word_embeddings
# The position embeddings.
snake_case_ : List[Any] = embeddings["""position_embeddings"""]["""weight"""]
# Read the causal mask dimension (seqlen). [max_sequence_length, hidden_size]
snake_case_ : Tuple = pos_embeddings.size(0 )
if n_positions != config.n_positions:
raise ValueError(
F'pos_embeddings.max_sequence_length={n_positions} and config.n_positions={config.n_positions} don\'t match' )
# Store the position embeddings.
snake_case_ : Union[str, Any] = pos_embeddings
# The transformer.
snake_case_ : Optional[Any] = lm["""transformer"""] if """transformer""" in lm.keys() else lm["""encoder"""]
# The regex to extract layer names.
snake_case_ : Optional[Any] = re.compile(r"""layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)""" )
# The simple map of names for "automated" rules.
snake_case_ : List[str] = {
"""attention.dense""": """.attn.c_proj.""",
"""self_attention.dense""": """.attn.c_proj.""",
"""mlp.dense_h_to_4h""": """.mlp.c_fc.""",
"""mlp.dense_4h_to_h""": """.mlp.c_proj.""",
}
# Extract the layers.
for key, val in transformer.items():
# Match the name.
snake_case_ : int = layer_re.match(__UpperCamelCase )
# Stop if that's not a layer
if m is None:
break
# The index of the layer.
snake_case_ : Tuple = int(m.group(1 ) )
# The name of the operation.
snake_case_ : Any = m.group(2 )
# Is it a weight or a bias?
snake_case_ : Union[str, Any] = m.group(3 )
# The name of the layer.
snake_case_ : str = F'transformer.h.{layer_idx}'
# For layernorm(s), simply store the layer norm.
if op_name.endswith("""layernorm""" ):
snake_case_ : Dict = """ln_1""" if op_name.startswith("""input""" ) else """ln_2"""
snake_case_ : Optional[int] = val
# Transpose the QKV matrix.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "weight":
# Insert a tensor of 1x1xDxD bias.
snake_case_ : Optional[Any] = torch.tril(torch.ones((n_positions, n_positions) , dtype=torch.floataa ) ).view(
1 , 1 , __UpperCamelCase , __UpperCamelCase )
snake_case_ : List[Any] = causal_mask
# Insert a "dummy" tensor for masked_bias.
snake_case_ : str = torch.tensor(-1E4 , dtype=torch.floataa )
snake_case_ : List[Any] = masked_bias
snake_case_ : Optional[int] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Megatron stores (3*D) x D but transformers-GPT2 expects D x 3*D.
snake_case_ : str = out_val.transpose(0 , 1 ).contiguous()
# Store.
snake_case_ : Tuple = out_val
# Transpose the bias.
elif (
op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value"
) and weight_or_bias == "bias":
snake_case_ : Optional[Any] = fix_query_key_value_ordering(__UpperCamelCase , __UpperCamelCase , 3 , __UpperCamelCase , __UpperCamelCase )
# Store. No change of shape.
snake_case_ : List[Any] = out_val
# Transpose the weights.
elif weight_or_bias == "weight":
snake_case_ : Any = megatron_to_transformers[op_name]
snake_case_ : str = val.transpose(0 , 1 )
# Copy the bias.
elif weight_or_bias == "bias":
snake_case_ : List[str] = megatron_to_transformers[op_name]
snake_case_ : Tuple = val
# DEBUG.
assert config.n_layer == layer_idx + 1
# The final layernorm.
snake_case_ : Dict = transformer["""final_layernorm.weight"""]
snake_case_ : Dict = transformer["""final_layernorm.bias"""]
# For LM head, transformers' wants the matrix to weight embeddings.
snake_case_ : Optional[int] = word_embeddings
# It should be done!
return output_state_dict
def __lowerCAmelCase ( ):
'''simple docstring'''
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument("""--print-checkpoint-structure""" , action="""store_true""" )
parser.add_argument(
"""path_to_checkpoint""" , type=__UpperCamelCase , help="""Path to the checkpoint file (.zip archive or direct .pt file)""" , )
parser.add_argument(
"""--config_file""" , default="""""" , type=__UpperCamelCase , help="""An optional config json file describing the pre-trained model.""" , )
snake_case_ : str = parser.parse_args()
# Extract the basename.
snake_case_ : Optional[Any] = os.path.dirname(args.path_to_checkpoint )
# Load the model.
# the .zip is very optional, let's keep it for backward compatibility
print(F'Extracting PyTorch state dictionary from {args.path_to_checkpoint}' )
if args.path_to_checkpoint.endswith(""".zip""" ):
with zipfile.ZipFile(args.path_to_checkpoint , """r""" ) as checkpoint:
with checkpoint.open("""release/mp_rank_00/model_optim_rng.pt""" ) as pytorch_dict:
snake_case_ : Optional[int] = torch.load(__UpperCamelCase , map_location="""cpu""" )
else:
snake_case_ : List[Any] = torch.load(args.path_to_checkpoint , map_location="""cpu""" )
snake_case_ : Any = input_state_dict.get("""args""" , __UpperCamelCase )
# Read the config, or default to the model released by NVIDIA.
if args.config_file == "":
if ds_args is not None:
if ds_args.bias_gelu_fusion:
snake_case_ : Any = """gelu_fast"""
elif ds_args.openai_gelu:
snake_case_ : Tuple = """gelu_new"""
else:
snake_case_ : List[str] = """gelu"""
else:
# in the very early days this used to be "gelu_new"
snake_case_ : Dict = """gelu_new"""
# Spell out all parameters in case the defaults change.
snake_case_ : List[str] = GPTaConfig(
vocab_size=5_0_2_5_7 , n_positions=1_0_2_4 , n_embd=1_0_2_4 , n_layer=2_4 , n_head=1_6 , n_inner=4_0_9_6 , activation_function=__UpperCamelCase , resid_pdrop=0.1 , embd_pdrop=0.1 , attn_pdrop=0.1 , layer_norm_epsilon=1E-5 , initializer_range=0.02 , summary_type="""cls_index""" , summary_use_proj=__UpperCamelCase , summary_activation=__UpperCamelCase , summary_proj_to_labels=__UpperCamelCase , summary_first_dropout=0.1 , scale_attn_weights=__UpperCamelCase , use_cache=__UpperCamelCase , bos_token_id=5_0_2_5_6 , eos_token_id=5_0_2_5_6 , )
else:
snake_case_ : List[Any] = GPTaConfig.from_json_file(args.config_file )
snake_case_ : int = ["""GPT2LMHeadModel"""]
# Convert.
print("""Converting""" )
snake_case_ : Tuple = convert_megatron_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Print the structure of converted state dict.
if args.print_checkpoint_structure:
recursive_print(__UpperCamelCase , __UpperCamelCase )
# Add tokenizer class info to config
# see https://github.com/huggingface/transformers/issues/13906)
if ds_args is not None:
snake_case_ : str = ds_args.tokenizer_type
if tokenizer_type == "GPT2BPETokenizer":
snake_case_ : Optional[Any] = """gpt2"""
elif tokenizer_type == "PretrainedFromHF":
snake_case_ : str = ds_args.tokenizer_name_or_path
else:
raise ValueError(F'Unrecognized tokenizer_type {tokenizer_type}' )
else:
snake_case_ : List[str] = """gpt2"""
snake_case_ : List[Any] = AutoTokenizer.from_pretrained(__UpperCamelCase )
snake_case_ : List[str] = type(__UpperCamelCase ).__name__
snake_case_ : Optional[int] = tokenizer_class
# Store the config to file.
print("""Saving config""" )
config.save_pretrained(__UpperCamelCase )
# Save tokenizer based on args
print(F'Adding {tokenizer_class} tokenizer files' )
tokenizer.save_pretrained(__UpperCamelCase )
# Store the state_dict to file.
snake_case_ : List[Any] = os.path.join(__UpperCamelCase , """pytorch_model.bin""" )
print(F'Saving checkpoint to "{output_checkpoint_file}"' )
torch.save(__UpperCamelCase , __UpperCamelCase )
####################################################################################################
if __name__ == "__main__":
main()
####################################################################################################
| 21 | 1 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a_ :
@staticmethod
def A__ ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
pass
@is_pipeline_test
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
lowercase = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
UpperCamelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
UpperCamelCase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
UpperCamelCase = vqa_pipeline(_SCREAMING_SNAKE_CASE , top_k=1 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [
[{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}],
[{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}],
] , )
@require_torch
def A__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
UpperCamelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCamelCase = """How many cats are there?"""
UpperCamelCase = vqa_pipeline(image=_SCREAMING_SNAKE_CASE , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}, {"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}] )
UpperCamelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
_SCREAMING_SNAKE_CASE , [{"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}, {"""score""": ANY(_SCREAMING_SNAKE_CASE ), """answer""": ANY(_SCREAMING_SNAKE_CASE )}] )
@slow
@require_torch
def A__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
UpperCamelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
UpperCamelCase = """How many cats are there?"""
UpperCamelCase = vqa_pipeline(image=_SCREAMING_SNAKE_CASE , question=_SCREAMING_SNAKE_CASE , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] )
UpperCamelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] )
UpperCamelCase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [[{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def A__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
| 301 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> bool:
UpperCamelCase = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def lowercase__ ( __UpperCamelCase = 5000 )-> int:
UpperCamelCase = [(i * (3 * i - 1)) // 2 for i in range(1 , __UpperCamelCase )]
for i, pentagonal_i in enumerate(__UpperCamelCase ):
for j in range(__UpperCamelCase , len(__UpperCamelCase ) ):
UpperCamelCase = pentagonal_nums[j]
UpperCamelCase = pentagonal_i + pentagonal_j
UpperCamelCase = pentagonal_j - pentagonal_i
if is_pentagonal(__UpperCamelCase ) and is_pentagonal(__UpperCamelCase ):
return b
return -1
if __name__ == "__main__":
print(f'{solution() = }')
| 301 | 1 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : Optional[int] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict) ->Dict:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Dict) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : int , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Dict) ->Any:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : List[Any] , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Dict) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : List[str] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : Optional[Any]) ->int:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : int , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[str]) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : List[Any]) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : List[Any] , **UpperCAmelCase_ : str) ->int:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : Optional[int] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : int) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Union[str, Any]) ->List[str]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Union[str, Any]) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any]) ->List[str]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Any) ->Optional[int]:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : str) ->Optional[Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Union[str, Any] , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Optional[int]) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Tuple , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : str , *UpperCAmelCase_ : int , **UpperCAmelCase_ : Tuple) ->int:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : List[Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str]) ->str:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : List[str]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str]) ->Any:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : List[Any]) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : Any) ->Dict:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : List[str] , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : str) ->str:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : str , *UpperCAmelCase_ : Optional[int] , **UpperCAmelCase_ : Any) ->Dict:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : Union[str, Any] , *UpperCAmelCase_ : Optional[Any] , **UpperCAmelCase_ : Tuple) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : List[Any] , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict) ->Dict:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , *UpperCAmelCase_ : Dict , **UpperCAmelCase_ : Union[str, Any]) ->Dict:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : Dict , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]) ->str:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : str , *UpperCAmelCase_ : str , **UpperCAmelCase_ : Dict) ->str:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Optional[int]) ->Optional[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : int , *UpperCAmelCase_ : Tuple , **UpperCAmelCase_ : int) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : int , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Tuple) ->Dict:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : str , *UpperCAmelCase_ : List[str] , **UpperCAmelCase_ : Union[str, Any]) ->Tuple:
'''simple docstring'''
requires_backends(cls , ["flax"])
class _SCREAMING_SNAKE_CASE ( metaclass=_a ):
'''simple docstring'''
lowercase_ = ["""flax"""]
def __init__(self : Any , *UpperCAmelCase_ : Union[str, Any] , **UpperCAmelCase_ : Optional[int]) ->Union[str, Any]:
'''simple docstring'''
requires_backends(self , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : int , *UpperCAmelCase_ : str , **UpperCAmelCase_ : List[Any]) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Dict , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Dict) ->List[Any]:
'''simple docstring'''
requires_backends(cls , ["flax"])
| 720 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"salesforce/blip2-opt-2.7b": "https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json",
}
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_2_vision_model"
def __init__(self : Union[str, Any] , UpperCAmelCase_ : int=1_408 , UpperCAmelCase_ : List[str]=6_144 , UpperCAmelCase_ : List[Any]=39 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : List[str]=224 , UpperCAmelCase_ : Any=14 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : str=0.0_0001 , UpperCAmelCase_ : List[str]=0.0 , UpperCAmelCase_ : str=1E-1_0 , UpperCAmelCase_ : Any=True , **UpperCAmelCase_ : Optional[Any] , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
lowerCamelCase__: Any =hidden_size
lowerCamelCase__: Any =intermediate_size
lowerCamelCase__: Union[str, Any] =num_hidden_layers
lowerCamelCase__: Optional[Any] =num_attention_heads
lowerCamelCase__: Dict =patch_size
lowerCamelCase__: List[Any] =image_size
lowerCamelCase__: Union[str, Any] =initializer_range
lowerCamelCase__: Optional[Any] =attention_dropout
lowerCamelCase__: Union[str, Any] =layer_norm_eps
lowerCamelCase__: Dict =hidden_act
lowerCamelCase__: Union[str, Any] =qkv_bias
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Optional[int] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: str =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
lowerCamelCase__: Any =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip_2_qformer"
def __init__(self : str , UpperCAmelCase_ : Any=30_522 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[int]=3_072 , UpperCAmelCase_ : Any="gelu" , UpperCAmelCase_ : Optional[int]=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : List[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1E-1_2 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]="absolute" , UpperCAmelCase_ : List[Any]=2 , UpperCAmelCase_ : int=1_408 , **UpperCAmelCase_ : Optional[int] , ) ->List[str]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowerCamelCase__: Optional[int] =vocab_size
lowerCamelCase__: Dict =hidden_size
lowerCamelCase__: Tuple =num_hidden_layers
lowerCamelCase__: List[Any] =num_attention_heads
lowerCamelCase__: Optional[Any] =hidden_act
lowerCamelCase__: Optional[Any] =intermediate_size
lowerCamelCase__: Dict =hidden_dropout_prob
lowerCamelCase__: Any =attention_probs_dropout_prob
lowerCamelCase__: Union[str, Any] =max_position_embeddings
lowerCamelCase__: Optional[Any] =initializer_range
lowerCamelCase__: List[Any] =layer_norm_eps
lowerCamelCase__: Tuple =position_embedding_type
lowerCamelCase__: List[Any] =cross_attention_frequency
lowerCamelCase__: Tuple =encoder_hidden_size
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Union[str, Any] , UpperCAmelCase_ : Union[str, os.PathLike] , **UpperCAmelCase_ : List[Any]) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCAmelCase_)
lowerCamelCase__ , lowerCamelCase__: Tuple =cls.get_config_dict(UpperCAmelCase_ , **UpperCAmelCase_)
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("model_type") == "blip-2":
lowerCamelCase__: Any =config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type") and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(UpperCAmelCase_ , **UpperCAmelCase_)
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "blip-2"
lowercase_ = True
def __init__(self : Any , UpperCAmelCase_ : Optional[int]=None , UpperCAmelCase_ : Tuple=None , UpperCAmelCase_ : List[str]=None , UpperCAmelCase_ : int=32 , **UpperCAmelCase_ : str) ->List[str]:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if vision_config is None:
lowerCamelCase__: Optional[int] ={}
logger.info("vision_config is None. initializing the Blip2VisionConfig with default values.")
if qformer_config is None:
lowerCamelCase__: str ={}
logger.info("qformer_config is None. Initializing the Blip2QFormerConfig with default values.")
if text_config is None:
lowerCamelCase__: Union[str, Any] ={}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`).")
lowerCamelCase__: Optional[Any] =BlipaVisionConfig(**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =BlipaQFormerConfig(**UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =text_config["model_type"] if "model_type" in text_config else "opt"
lowerCamelCase__: Dict =CONFIG_MAPPING[text_model_type](**UpperCAmelCase_)
lowerCamelCase__: Optional[int] =self.text_config.tie_word_embeddings
lowerCamelCase__: List[str] =self.text_config.is_encoder_decoder
lowerCamelCase__: Dict =num_query_tokens
lowerCamelCase__: Optional[Any] =self.vision_config.hidden_size
lowerCamelCase__: Tuple =self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
lowerCamelCase__: List[Any] =1.0
lowerCamelCase__: Union[str, Any] =0.02
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Any , UpperCAmelCase_ : BlipaVisionConfig , UpperCAmelCase_ : BlipaQFormerConfig , UpperCAmelCase_ : PretrainedConfig , **UpperCAmelCase_ : int , ) ->Optional[int]:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **UpperCAmelCase_ , )
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: List[Any] =copy.deepcopy(self.__dict__)
lowerCamelCase__: Any =self.vision_config.to_dict()
lowerCamelCase__: Any =self.qformer_config.to_dict()
lowerCamelCase__: Any =self.text_config.to_dict()
lowerCamelCase__: int =self.__class__.model_type
return output
| 437 | 0 |
'''simple docstring'''
import math
import sys
def _lowerCAmelCase ( lowercase : int ) ->Optional[Any]:
"""simple docstring"""
if number != int(lowerCamelCase_ ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
lowercase__ = [-1] * (number + 1)
lowercase__ = 0
for i in range(1 , number + 1 ):
lowercase__ = sys.maxsize
lowercase__ = int(math.sqrt(lowerCamelCase_ ) )
for j in range(1 , root + 1 ):
lowercase__ = 1 + answers[i - (j**2)]
lowercase__ = min(lowerCamelCase_ , lowerCamelCase_ )
lowercase__ = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 161 |
"""simple docstring"""
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
A = importlib.util.find_spec('s3fs') is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
A = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F'''A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.''')
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: str ):
"""simple docstring"""
if "://" in dataset_path:
snake_case : List[str] = dataset_path.split("://" )[1]
return dataset_path
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: fsspec.AbstractFileSystem ):
"""simple docstring"""
if fs is not None and fs.protocol != "file":
return True
else:
return False
def __SCREAMING_SNAKE_CASE ( lowerCamelCase_: fsspec.AbstractFileSystem , lowerCamelCase_: str , lowerCamelCase_: str ):
"""simple docstring"""
snake_case : int = not is_remote_filesystem(lowerCamelCase_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(lowerCamelCase_ ) , fs._strip_protocol(lowerCamelCase_ ) )
else:
fs.mv(lowerCamelCase_ , lowerCamelCase_ , recursive=lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( ):
"""simple docstring"""
if hasattr(fsspec.asyn , "reset_lock" ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
snake_case : Union[str, Any] = None
snake_case : Optional[int] = None
snake_case : List[str] = threading.Lock()
| 449 | 0 |
"""simple docstring"""
def _A ( __lowercase ):
"""simple docstring"""
if len(__lowercase ) <= 1:
return lst
lowerCamelCase__ = 1
while i < len(__lowercase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowerCamelCase__ , lowerCamelCase__ = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowerCamelCase__ = 1
return lst
if __name__ == "__main__":
__magic_name__ = input("""Enter numbers separated by a comma:\n""").strip()
__magic_name__ = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 258 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def _A ( __lowercase ):
"""simple docstring"""
lowerCamelCase__ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase__ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
lowerCamelCase__ = 4
lowerCamelCase__ = 48
lowerCamelCase__ = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase__ = [6, 6, 6, 6]
lowerCamelCase__ = 60
lowerCamelCase__ = [6, 6, 6, 6]
lowerCamelCase__ = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase__ = 4
lowerCamelCase__ = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
lowerCamelCase__ = 1
lowerCamelCase__ = 1
lowerCamelCase__ = 126
lowerCamelCase__ = 7
lowerCamelCase__ = 2_55.0
lowerCamelCase__ = """"""
return config
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
lowerCamelCase__ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
lowerCamelCase__ = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
lowerCamelCase__ = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
lowerCamelCase__ = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
lowerCamelCase__ = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
lowerCamelCase__ = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
lowerCamelCase__ = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
lowerCamelCase__ = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
lowerCamelCase__ = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
lowerCamelCase__ = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
lowerCamelCase__ = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
lowerCamelCase__ = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
lowerCamelCase__ = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
lowerCamelCase__ = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
lowerCamelCase__ = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
lowerCamelCase__ = """layernorm.weight"""
if name == "norm.bias":
lowerCamelCase__ = """layernorm.bias"""
if "conv_first" in name:
lowerCamelCase__ = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
lowerCamelCase__ = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
lowerCamelCase__ = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
lowerCamelCase__ = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
lowerCamelCase__ = name.replace("""upsample.2""" , """upsample.convolution_1""" )
lowerCamelCase__ = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
lowerCamelCase__ = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
lowerCamelCase__ = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
lowerCamelCase__ = """swin2sr.""" + name
return name
def _A ( __lowercase , __lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(__lowercase )
if "qkv" in key:
lowerCamelCase__ = key.split(""".""" )
lowerCamelCase__ = int(key_split[1] )
lowerCamelCase__ = int(key_split[4] )
lowerCamelCase__ = config.embed_dim
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
pass
else:
lowerCamelCase__ = val
return orig_state_dict
def _A ( __lowercase , __lowercase , __lowercase ):
"""simple docstring"""
lowerCamelCase__ = get_config(__lowercase )
lowerCamelCase__ = SwinaSRForImageSuperResolution(__lowercase )
model.eval()
lowerCamelCase__ = torch.hub.load_state_dict_from_url(__lowercase , map_location="""cpu""" )
lowerCamelCase__ = convert_state_dict(__lowercase , __lowercase )
lowerCamelCase__ , lowerCamelCase__ = model.load_state_dict(__lowercase , strict=__lowercase )
if len(__lowercase ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(__lowercase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
lowerCamelCase__ = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
lowerCamelCase__ = Image.open(requests.get(__lowercase , stream=__lowercase ).raw ).convert("""RGB""" )
lowerCamelCase__ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
lowerCamelCase__ = 126 if """Jpeg""" in checkpoint_url else 256
lowerCamelCase__ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ),
] )
lowerCamelCase__ = transforms(__lowercase ).unsqueeze(0 )
if config.num_channels == 1:
lowerCamelCase__ = pixel_values[:, 0, :, :].unsqueeze(1 )
lowerCamelCase__ = model(__lowercase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
lowerCamelCase__ = torch.Size([1, 3, 512, 512] )
lowerCamelCase__ = torch.tensor(
[[-0.70_87, -0.71_38, -0.67_21], [-0.83_40, -0.80_95, -0.72_98], [-0.91_49, -0.84_14, -0.79_40]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
lowerCamelCase__ = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase__ = torch.tensor(
[[-0.77_75, -0.81_05, -0.89_33], [-0.77_64, -0.83_56, -0.92_25], [-0.79_76, -0.86_86, -0.95_79]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
lowerCamelCase__ = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase__ = torch.tensor(
[[-0.80_35, -0.75_04, -0.74_91], [-0.85_38, -0.81_24, -0.77_82], [-0.88_04, -0.86_51, -0.84_93]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
lowerCamelCase__ = torch.Size([1, 3, 512, 512] )
lowerCamelCase__ = torch.tensor(
[[-0.76_69, -0.86_62, -0.87_67], [-0.88_10, -0.99_62, -0.98_20], [-0.93_40, -1.03_22, -1.11_49]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
lowerCamelCase__ = torch.Size([1, 3, 1024, 1024] )
lowerCamelCase__ = torch.tensor(
[[-0.52_38, -0.55_57, -0.63_21], [-0.60_16, -0.59_03, -0.63_91], [-0.62_44, -0.63_34, -0.68_89]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , __lowercase , atol=1e-3 )
print("""Looks ok!""" )
lowerCamelCase__ = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
lowerCamelCase__ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(__lowercase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(__lowercase )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint_url""",
default="""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""",
type=str,
help="""URL of the original Swin2SR checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument("""--push_to_hub""", action="""store_true""", help="""Whether to push the converted model to the hub.""")
__magic_name__ = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 258 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCAmelCase = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 84 |
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __a ( __lowerCAmelCase ) -> int:
for param in module.parameters():
SCREAMING_SNAKE_CASE : List[Any] = False
def __a ( ) -> List[str]:
SCREAMING_SNAKE_CASE : List[Any] = 'cuda' if torch.cuda.is_available() else 'cpu'
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
SCREAMING_SNAKE_CASE : List[str] = 'mps'
if device == "mps":
print(
'WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'
' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'
' with generations.' )
return device
def __a ( __lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE : Optional[int] = plt.imshow(__lowerCAmelCase )
fig.axes.get_xaxis().set_visible(__lowerCAmelCase )
fig.axes.get_yaxis().set_visible(__lowerCAmelCase )
plt.show()
def __a ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE : str = datetime.now()
SCREAMING_SNAKE_CASE : Optional[int] = current_time.strftime('%H:%M:%S' )
return timestamp | 352 | 0 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def UpperCamelCase_ ( A__ , A__ = 0.0 , A__ = 1.0 ):
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase__ =logging.get_logger(__name__)
lowercase__ ={'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
lowercase__ ={
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
lowercase__ ={
'allenai/led-base-16384': 1_63_84,
}
class a_ ( UpperCamelCase__ ):
lowerCamelCase__ : str = VOCAB_FILES_NAMES
lowerCamelCase__ : Any = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ : Tuple = LEDTokenizer
lowerCamelCase__ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="replace" , UpperCAmelCase="<s>" , UpperCAmelCase="</s>" , UpperCAmelCase="</s>" , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase=False , UpperCAmelCase=True , **UpperCAmelCase , ):
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , trim_offsets=UpperCAmelCase , **UpperCAmelCase , )
a_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = getattr(UpperCAmelCase , pre_tok_state.pop("""type""" ) )
a_ = add_prefix_space
a_ = pre_tok_class(**UpperCAmelCase )
a_ = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
a_ = """post_processor"""
a_ = getattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
if tokenizer_component_instance:
a_ = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
a_ = tuple(state["""sep"""] )
if "cls" in state:
a_ = tuple(state["""cls"""] )
a_ = False
if state.get("""add_prefix_space""" , UpperCAmelCase ) != add_prefix_space:
a_ = add_prefix_space
a_ = True
if state.get("""trim_offsets""" , UpperCAmelCase ) != trim_offsets:
a_ = trim_offsets
a_ = True
if changes_to_apply:
a_ = getattr(UpperCAmelCase , state.pop("""type""" ) )
a_ = component_class(**UpperCAmelCase )
setattr(self.backend_tokenizer , UpperCAmelCase , UpperCAmelCase )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCAmelCase__ ( self ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCAmelCase__ ( self , UpperCAmelCase ):
a_ = AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else value
a_ = value
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , *UpperCAmelCase , **UpperCAmelCase ):
a_ = kwargs.get("""is_split_into_words""" , UpperCAmelCase )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase=None ):
a_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None ):
a_ = [self.sep_token_id]
a_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCAmelCase__ ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase = None , UpperCAmelCase = None , ):
a_ = super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
a_ = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
a_ = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
a_ = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCAmelCase )
if needs_to_be_padded:
a_ = len(UpperCAmelCase ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
a_ = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
a_ = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 511 | 0 |
'''simple docstring'''
def lowerCamelCase_ ( __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 292 |
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class __a ( unittest.TestCase ):
@property
def __lowercase ( self : int ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def __lowercase ( self : int ):
'''simple docstring'''
UpperCamelCase__ : List[str] = self.dummy_uncond_unet
UpperCamelCase__ : List[str] = ScoreSdeVeScheduler()
UpperCamelCase__ : Union[str, Any] = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
sde_ve.to(SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = torch.manual_seed(0 )
UpperCamelCase__ : List[Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE ).images
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : Any = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE , return_dict=SCREAMING_SNAKE_CASE )[
0
]
UpperCamelCase__ : List[str] = image[0, -3:, -3:, -1]
UpperCamelCase__ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase__ : int = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class __a ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCamelCase__ : List[str] = "google/ncsnpp-church-256"
UpperCamelCase__ : Tuple = UNetaDModel.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Union[str, Any] = ScoreSdeVeScheduler.from_pretrained(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Dict = ScoreSdeVePipeline(unet=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE )
sde_ve.to(SCREAMING_SNAKE_CASE )
sde_ve.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE )
UpperCamelCase__ : str = torch.manual_seed(0 )
UpperCamelCase__ : Tuple = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=SCREAMING_SNAKE_CASE ).images
UpperCamelCase__ : Tuple = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
UpperCamelCase__ : Any = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 228 | 0 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class _a ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=7 , _snake_case=3 , _snake_case=18 , _snake_case=30 , _snake_case=400 , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=[0.5, 0.5, 0.5] , _snake_case=[0.5, 0.5, 0.5] , ):
_UpperCAmelCase =size if size is not None else {"height": 18, "width": 18}
_UpperCAmelCase =parent
_UpperCAmelCase =batch_size
_UpperCAmelCase =num_channels
_UpperCAmelCase =image_size
_UpperCAmelCase =min_resolution
_UpperCAmelCase =max_resolution
_UpperCAmelCase =do_resize
_UpperCAmelCase =size
_UpperCAmelCase =do_normalize
_UpperCAmelCase =image_mean
_UpperCAmelCase =image_std
def SCREAMING_SNAKE_CASE ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _a ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case =DPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =DPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , "image_mean" ) )
self.assertTrue(hasattr(lowercase_ , "image_std" ) )
self.assertTrue(hasattr(lowercase_ , "do_normalize" ) )
self.assertTrue(hasattr(lowercase_ , "do_resize" ) )
self.assertTrue(hasattr(lowercase_ , "size" ) )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
_UpperCAmelCase =self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image )
# Test not batched input
_UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase =image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray )
# Test not batched input
_UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase =image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def SCREAMING_SNAKE_CASE ( self ):
_UpperCAmelCase =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase =prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ )
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor )
# Test not batched input
_UpperCAmelCase =image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
_UpperCAmelCase =image_processing(lowercase_ , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 716 |
def lowerCamelCase__ ( _lowerCamelCase = 1000 ) ->int:
_UpperCAmelCase =2**power
_UpperCAmelCase =str(_lowerCamelCase )
_UpperCAmelCase =list(_lowerCamelCase )
_UpperCAmelCase =0
for i in list_num:
sum_of_num += int(_lowerCamelCase )
return sum_of_num
if __name__ == "__main__":
snake_case__ : List[str] = int(input('Enter the power of 2: ').strip())
print('2 ^ ', power, ' = ', 2**power)
snake_case__ : Union[str, Any] = solution(power)
print('Sum of the digits is: ', result)
| 592 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class _snake_case (unittest.TestCase):
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
# fmt: off
UpperCAmelCase_ : List[str] = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCAmelCase_ : List[str] = dict(zip(_snake_case ,range(len(_snake_case ) ) ) )
UpperCAmelCase_ : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCAmelCase_ : Dict = {"unk_token": "<unk>"}
UpperCAmelCase_ : Optional[int] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(_snake_case ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(_snake_case ) )
UpperCAmelCase_ : Optional[Any] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,_snake_case )
with open(self.image_processor_file ,"w" ,encoding="utf-8" ) as fp:
json.dump(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ,**_snake_case ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname ,**_snake_case )
def UpperCamelCase__ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
UpperCAmelCase_ : Union[str, Any] = [Image.fromarray(np.moveaxis(_snake_case ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Tuple = self.get_tokenizer()
UpperCAmelCase_ : str = self.get_rust_tokenizer()
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_slow.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(self.tmpdirname ,use_fast=_snake_case )
UpperCAmelCase_ : str = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
processor_fast.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : str = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_snake_case )
self.assertIsInstance(processor_fast.tokenizer ,_snake_case )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_snake_case )
self.assertIsInstance(processor_fast.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = CLIPProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer(bos_token="(BOS)" ,eos_token="(EOS)" )
UpperCAmelCase_ : Tuple = self.get_image_processor(do_normalize=_snake_case ,padding_value=1.0 )
UpperCAmelCase_ : int = CLIPProcessor.from_pretrained(
self.tmpdirname ,bos_token="(BOS)" ,eos_token="(EOS)" ,do_normalize=_snake_case ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_snake_case )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : List[str] = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Dict = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Optional[int] = image_processor(_snake_case ,return_tensors="np" )
UpperCAmelCase_ : Any = processor(images=_snake_case ,return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Optional[Any] = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Tuple = "lower newer"
UpperCAmelCase_ : Any = processor(text=_snake_case )
UpperCAmelCase_ : List[Any] = tokenizer(_snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Union[str, Any] = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Any = "lower newer"
UpperCAmelCase_ : List[str] = self.prepare_image_inputs()
UpperCAmelCase_ : str = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_snake_case ):
processor()
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : str = self.get_image_processor()
UpperCAmelCase_ : Dict = self.get_tokenizer()
UpperCAmelCase_ : Optional[int] = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_ : int = processor.batch_decode(_snake_case )
UpperCAmelCase_ : int = tokenizer.batch_decode(_snake_case )
self.assertListEqual(_snake_case ,_snake_case )
def UpperCamelCase__ ( self ):
UpperCAmelCase_ : Dict = self.get_image_processor()
UpperCAmelCase_ : int = self.get_tokenizer()
UpperCAmelCase_ : Tuple = CLIPProcessor(tokenizer=_snake_case ,image_processor=_snake_case )
UpperCAmelCase_ : Optional[int] = "lower newer"
UpperCAmelCase_ : Any = self.prepare_image_inputs()
UpperCAmelCase_ : Dict = processor(text=_snake_case ,images=_snake_case )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 71 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class _snake_case (nn.Module):
def __init__( self ,_snake_case = 16 ,_snake_case = 88 ,_snake_case = None ,_snake_case = 1 ,_snake_case = 0.0 ,_snake_case = 32 ,_snake_case = None ,_snake_case = False ,_snake_case = None ,_snake_case = None ,_snake_case = "geglu" ,_snake_case = None ,):
super().__init__()
UpperCAmelCase_ : Optional[Any] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=_snake_case ,attention_head_dim=_snake_case ,in_channels=_snake_case ,num_layers=_snake_case ,dropout=_snake_case ,norm_num_groups=_snake_case ,cross_attention_dim=_snake_case ,attention_bias=_snake_case ,sample_size=_snake_case ,num_vector_embeds=_snake_case ,activation_fn=_snake_case ,num_embeds_ada_norm=_snake_case ,)
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase_ : List[str] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase_ : int = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase_ : List[Any] = [1, 0]
def UpperCamelCase__ ( self ,_snake_case ,_snake_case ,_snake_case=None ,_snake_case=None ,_snake_case=None ,_snake_case = True ,):
UpperCAmelCase_ : List[str] = hidden_states
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase_ : Any = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase_ : Any = self.transformer_index_for_condition[i]
UpperCAmelCase_ : int = self.transformers[transformer_index](
_snake_case ,encoder_hidden_states=_snake_case ,timestep=_snake_case ,cross_attention_kwargs=_snake_case ,return_dict=_snake_case ,)[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase_ : Dict = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase_ : List[Any] = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=_snake_case )
| 71 | 1 |
"""simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = set()
# edges = list of graph's edges
lowerCAmelCase__ = get_edges(lowerCamelCase__ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
lowerCAmelCase__ , lowerCAmelCase__ = edges.pop()
chosen_vertices.add(lowerCamelCase__ )
chosen_vertices.add(lowerCamelCase__ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCamelCase__ )
return chosen_vertices
def _UpperCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
lowerCAmelCase__ = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 674 | """simple docstring"""
def _UpperCAmelCase ( lowerCamelCase__ = 50 ):
"""simple docstring"""
lowerCAmelCase__ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"{solution() = }")
| 674 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase_ : Optional[int] = {
'''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''],
'''tokenization_electra''': ['''ElectraTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Union[str, Any] = ['''ElectraTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : str = [
'''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ElectraForCausalLM''',
'''ElectraForMaskedLM''',
'''ElectraForMultipleChoice''',
'''ElectraForPreTraining''',
'''ElectraForQuestionAnswering''',
'''ElectraForSequenceClassification''',
'''ElectraForTokenClassification''',
'''ElectraModel''',
'''ElectraPreTrainedModel''',
'''load_tf_weights_in_electra''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = [
'''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFElectraForMaskedLM''',
'''TFElectraForMultipleChoice''',
'''TFElectraForPreTraining''',
'''TFElectraForQuestionAnswering''',
'''TFElectraForSequenceClassification''',
'''TFElectraForTokenClassification''',
'''TFElectraModel''',
'''TFElectraPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = [
'''FlaxElectraForCausalLM''',
'''FlaxElectraForMaskedLM''',
'''FlaxElectraForMultipleChoice''',
'''FlaxElectraForPreTraining''',
'''FlaxElectraForQuestionAnswering''',
'''FlaxElectraForSequenceClassification''',
'''FlaxElectraForTokenClassification''',
'''FlaxElectraModel''',
'''FlaxElectraPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig
from .tokenization_electra import ElectraTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_electra_fast import ElectraTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_electra import (
ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
ElectraForCausalLM,
ElectraForMaskedLM,
ElectraForMultipleChoice,
ElectraForPreTraining,
ElectraForQuestionAnswering,
ElectraForSequenceClassification,
ElectraForTokenClassification,
ElectraModel,
ElectraPreTrainedModel,
load_tf_weights_in_electra,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_electra import (
TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFElectraForMaskedLM,
TFElectraForMultipleChoice,
TFElectraForPreTraining,
TFElectraForQuestionAnswering,
TFElectraForSequenceClassification,
TFElectraForTokenClassification,
TFElectraModel,
TFElectraPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_electra import (
FlaxElectraForCausalLM,
FlaxElectraForMaskedLM,
FlaxElectraForMultipleChoice,
FlaxElectraForPreTraining,
FlaxElectraForQuestionAnswering,
FlaxElectraForSequenceClassification,
FlaxElectraForTokenClassification,
FlaxElectraModel,
FlaxElectraPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 414 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : Any = {
'''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vqa-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''',
'''uclanlp/visualbert-vcr-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'''
),
'''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''',
'''uclanlp/visualbert-nlvr2-coco-pre''': (
'''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'''
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class __lowerCAmelCase ( __a ):
snake_case : Optional[int] = """visual_bert"""
def __init__(self , lowerCAmelCase__=3_0_5_2_2 , lowerCAmelCase__=7_6_8 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=1_2 , lowerCAmelCase__=3_0_7_2 , lowerCAmelCase__="gelu" , lowerCAmelCase__=0.1 , lowerCAmelCase__=0.1 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=2 , lowerCAmelCase__=0.0_2 , lowerCAmelCase__=1e-12 , lowerCAmelCase__=False , lowerCAmelCase__=True , lowerCAmelCase__=1 , lowerCAmelCase__=0 , lowerCAmelCase__=2 , **lowerCAmelCase__ , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = vocab_size
_UpperCAmelCase : int = max_position_embeddings
_UpperCAmelCase : Tuple = hidden_size
_UpperCAmelCase : Tuple = visual_embedding_dim
_UpperCAmelCase : int = num_hidden_layers
_UpperCAmelCase : Optional[Any] = num_attention_heads
_UpperCAmelCase : str = intermediate_size
_UpperCAmelCase : Tuple = hidden_act
_UpperCAmelCase : Optional[int] = hidden_dropout_prob
_UpperCAmelCase : Tuple = attention_probs_dropout_prob
_UpperCAmelCase : Optional[int] = initializer_range
_UpperCAmelCase : Tuple = type_vocab_size
_UpperCAmelCase : Optional[int] = layer_norm_eps
_UpperCAmelCase : Dict = bypass_transformer
_UpperCAmelCase : Union[str, Any] = special_visual_initialize
| 414 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__magic_name__ = "python tqdm regex requests packaging filelock numpy tokenizers".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("dataclasses")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("importlib_metadata")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def _lowerCAmelCase ( A__: List[Any] , A__: List[Any]=None ):
'''simple docstring'''
require_version(deps[pkg] , _lowerCamelCase )
| 717 |
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
__magic_name__ = logging.getLogger(__name__)
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=128 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
@dataclass
class lowercase :
'''simple docstring'''
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Evaluation language. Also train language if `train_language` is set to None."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Train language if it is different from the evaluation language."""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()"""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
__SCREAMING_SNAKE_CASE = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
__SCREAMING_SNAKE_CASE = field(
default=A__ , metadata={"""help""": """Will enable to load a pretrained model whose head dimensions are different."""} , )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('''run_xnli''' , A__ )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase = training_args.get_process_log_level()
logger.setLevel(A__ )
datasets.utils.logging.set_verbosity(A__ )
transformers.utils.logging.set_verbosity(A__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(F"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
'''Use --overwrite_output_dir to overcome.''' )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
'''the `--output_dir` or add `--overwrite_output_dir` to train from scratch.''' )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
UpperCAmelCase = load_dataset(
'''xnli''' , model_args.language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
UpperCAmelCase = load_dataset(
'''xnli''' , model_args.train_language , split='''train''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = train_dataset.features['''label'''].names
if training_args.do_eval:
UpperCAmelCase = load_dataset(
'''xnli''' , model_args.language , split='''validation''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = eval_dataset.features['''label'''].names
if training_args.do_predict:
UpperCAmelCase = load_dataset(
'''xnli''' , model_args.language , split='''test''' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = predict_dataset.features['''label'''].names
# Labels
UpperCAmelCase = len(A__ )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=A__ , idalabel={str(A__ ): label for i, label in enumerate(A__ )} , labelaid={label: i for i, label in enumerate(A__ )} , finetuning_task='''xnli''' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , do_lower_case=model_args.do_lower_case , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=A__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
UpperCAmelCase = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCAmelCase = False
def preprocess_function(A__: Dict ):
# Tokenize the texts
return tokenizer(
examples['''premise'''] , examples['''hypothesis'''] , padding=A__ , max_length=data_args.max_seq_length , truncation=A__ , )
if training_args.do_train:
if data_args.max_train_samples is not None:
UpperCAmelCase = min(len(A__ ) , data_args.max_train_samples )
UpperCAmelCase = train_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='''train dataset map pre-processing''' ):
UpperCAmelCase = train_dataset.map(
A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on train dataset''' , )
# Log a few random samples from the training set:
for index in random.sample(range(len(A__ ) ) , 3 ):
logger.info(F"""Sample {index} of the training set: {train_dataset[index]}.""" )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
UpperCAmelCase = min(len(A__ ) , data_args.max_eval_samples )
UpperCAmelCase = eval_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='''validation dataset map pre-processing''' ):
UpperCAmelCase = eval_dataset.map(
A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on validation dataset''' , )
if training_args.do_predict:
if data_args.max_predict_samples is not None:
UpperCAmelCase = min(len(A__ ) , data_args.max_predict_samples )
UpperCAmelCase = predict_dataset.select(range(A__ ) )
with training_args.main_process_first(desc='''prediction dataset map pre-processing''' ):
UpperCAmelCase = predict_dataset.map(
A__ , batched=A__ , load_from_cache_file=not data_args.overwrite_cache , desc='''Running tokenizer on prediction dataset''' , )
# Get the metric function
UpperCAmelCase = evaluate.load('''xnli''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A__: EvalPrediction ):
UpperCAmelCase = p.predictions[0] if isinstance(p.predictions , A__ ) else p.predictions
UpperCAmelCase = np.argmax(A__ , axis=1 )
return metric.compute(predictions=A__ , references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCAmelCase = default_data_collator
elif training_args.fpaa:
UpperCAmelCase = DataCollatorWithPadding(A__ , pad_to_multiple_of=8 )
else:
UpperCAmelCase = None
# Initialize our Trainer
UpperCAmelCase = Trainer(
model=A__ , args=A__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=A__ , tokenizer=A__ , data_collator=A__ , )
# Training
if training_args.do_train:
UpperCAmelCase = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase = last_checkpoint
UpperCAmelCase = trainer.train(resume_from_checkpoint=A__ )
UpperCAmelCase = train_result.metrics
UpperCAmelCase = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A__ )
)
UpperCAmelCase = min(A__ , len(A__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics('''train''' , A__ )
trainer.save_metrics('''train''' , A__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
UpperCAmelCase = trainer.evaluate(eval_dataset=A__ )
UpperCAmelCase = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A__ )
UpperCAmelCase = min(A__ , len(A__ ) )
trainer.log_metrics('''eval''' , A__ )
trainer.save_metrics('''eval''' , A__ )
# Prediction
if training_args.do_predict:
logger.info('''*** Predict ***''' )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = trainer.predict(A__ , metric_key_prefix='''predict''' )
UpperCAmelCase = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(A__ )
)
UpperCAmelCase = min(A__ , len(A__ ) )
trainer.log_metrics('''predict''' , A__ )
trainer.save_metrics('''predict''' , A__ )
UpperCAmelCase = np.argmax(A__ , axis=1 )
UpperCAmelCase = os.path.join(training_args.output_dir , '''predictions.txt''' )
if trainer.is_world_process_zero():
with open(A__ , '''w''' ) as writer:
writer.write('''index\tprediction\n''' )
for index, item in enumerate(A__ ):
UpperCAmelCase = label_list[item]
writer.write(F"""{index}\t{item}\n""" )
if __name__ == "__main__":
main()
| 391 | 0 |
def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Dict:
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
__snake_case = ''''''
while len(snake_case_ ) % 3 != 0:
__snake_case = '''0''' + bin_string
__snake_case = [
bin_string[index : index + 3]
for index in range(len(snake_case_ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
__snake_case = 0
for index, val in enumerate(snake_case_ ):
oct_val += int(2 ** (2 - index) * int(snake_case_ ) )
oct_string += str(snake_case_ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 592 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase_ ( lowerCamelCase ):
a__ = ''''''
a__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self , __lowerCAmelCase = None , __lowerCAmelCase = None , **__lowerCAmelCase , ):
"""simple docstring"""
super().__init__(self , **__lowerCAmelCase )
__magic_name__ :List[Any] = repo_info
__magic_name__ :Dict = token
__magic_name__ :Optional[Any] = None
def A ( self ):
"""simple docstring"""
if self.dir_cache is None:
__magic_name__ :Any = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
__magic_name__ :Optional[int] = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowerCAmelCase ): {'''name''': str(__lowerCAmelCase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def A ( self , __lowerCAmelCase , __lowerCAmelCase = "rb" , **__lowerCAmelCase , ):
"""simple docstring"""
if not isinstance(self.repo_info , __lowerCAmelCase ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
__magic_name__ :Union[str, Any] = hf_hub_url(self.repo_info.id , __lowerCAmelCase , revision=self.repo_info.sha )
return fsspec.open(
__lowerCAmelCase , mode=__lowerCAmelCase , headers=get_authentication_headers_for_url(__lowerCAmelCase , use_auth_token=self.token ) , client_kwargs={'''trust_env''': True} , ).open()
def A ( self , __lowerCAmelCase , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :str = self._strip_protocol(__lowerCAmelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowerCAmelCase )
def A ( self , __lowerCAmelCase , __lowerCAmelCase=False , **__lowerCAmelCase ):
"""simple docstring"""
self._get_dirs()
__magic_name__ :Union[str, Any] = PurePosixPath(path.strip('''/''' ) )
__magic_name__ :Dict = {}
for p, f in self.dir_cache.items():
__magic_name__ :int = PurePosixPath(p.strip('''/''' ) )
__magic_name__ :Tuple = p.parent
if root == path:
__magic_name__ :Optional[Any] = f
__magic_name__ :List[Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 0 | 0 |
from ..utils import DummyObject, requires_backends
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax""", """transformers"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['flax', 'transformers'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax""", """transformers"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax""", """transformers"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Tuple:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
class _a ( metaclass=UpperCAmelCase__ ):
"""simple docstring"""
A_ = ["""flax""", """transformers"""]
def __init__( self , *_UpperCAmelCase , **_UpperCAmelCase ) -> Union[str, Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def _UpperCAmelCase ( cls , *_UpperCAmelCase , **_UpperCAmelCase ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
| 618 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
snake_case__ : str = logging.getLogger(__name__)
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = 42
A_ = None
A_ = None
A_ = None
@dataclass(frozen=UpperCAmelCase__ )
class _a :
"""simple docstring"""
A_ = 42
A_ = None
A_ = None
A_ = None
A_ = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> Any:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = os.path.join(
_UpperCAmelCase , 'cached_{}_{}_{}_{}'.format(
'dev' if evaluate else 'train' , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) , _UpperCAmelCase , ) , )
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase_ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
UpperCamelCase_ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
UpperCamelCase_ = (
processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
)
logger.info('Training examples: %s' , len(_UpperCAmelCase ) )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
logger.info('Saving features into cached file %s' , _UpperCAmelCase )
torch.save(self.features , _UpperCAmelCase )
def __len__( self ) -> Dict:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> Dict:
return self.label_list
if is_tf_available():
import tensorflow as tf
class _a :
"""simple docstring"""
A_ = 42
def __init__( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 128 , _UpperCAmelCase=False , _UpperCAmelCase = False , ) -> int:
UpperCamelCase_ = hans_processors[task]()
UpperCamelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase_ , UpperCamelCase_ = label_list[2], label_list[1]
UpperCamelCase_ = label_list
UpperCamelCase_ = processor.get_dev_examples(_UpperCAmelCase ) if evaluate else processor.get_train_examples(_UpperCAmelCase )
UpperCamelCase_ = hans_convert_examples_to_features(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc='convert examples to features' ):
if ex_index % 10000 == 0:
logger.info('Writing example %d of %d' % (ex_index, len(_UpperCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase_ = tf.data.Dataset.from_generator(
_UpperCAmelCase , (
{
'example_id': tf.intaa,
'input_ids': tf.intaa,
'attention_mask': tf.intaa,
'token_type_ids': tf.intaa,
},
tf.intaa,
) , (
{
'example_id': tf.TensorShape([] ),
'input_ids': tf.TensorShape([None, None] ),
'attention_mask': tf.TensorShape([None, None] ),
'token_type_ids': tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _UpperCAmelCase ( self ) -> List[str]:
return self.dataset
def __len__( self ) -> str:
return len(self.features )
def __getitem__( self , _UpperCAmelCase ) -> InputFeatures:
return self.features[i]
def _UpperCAmelCase ( self ) -> int:
return self.label_list
class _a ( UpperCAmelCase__ ):
"""simple docstring"""
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Tuple:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_train_set.txt' ) ) , 'train' )
def _UpperCAmelCase ( self , _UpperCAmelCase ) -> Dict:
return self._create_examples(self._read_tsv(os.path.join(_UpperCAmelCase , 'heuristics_evaluation_set.txt' ) ) , 'dev' )
def _UpperCAmelCase ( self ) -> List[Any]:
return ["contradiction", "entailment", "neutral"]
def _UpperCAmelCase ( self , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
UpperCamelCase_ = []
for i, line in enumerate(_UpperCAmelCase ):
if i == 0:
continue
UpperCamelCase_ = '%s-%s' % (set_type, line[0])
UpperCamelCase_ = line[5]
UpperCamelCase_ = line[6]
UpperCamelCase_ = line[7][2:] if line[7].startswith('ex' ) else line[7]
UpperCamelCase_ = line[0]
examples.append(InputExample(guid=_UpperCAmelCase , text_a=_UpperCAmelCase , text_b=_UpperCAmelCase , label=_UpperCAmelCase , pairID=_UpperCAmelCase ) )
return examples
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase , ):
UpperCamelCase_ = {label: i for i, label in enumerate(__lowercase)}
UpperCamelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__lowercase) , desc='convert examples to features'):
if ex_index % 10000 == 0:
logger.info('Writing example %d' % (ex_index))
UpperCamelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowercase , max_length=__lowercase , padding='max_length' , truncation=__lowercase , return_overflowing_tokens=__lowercase , )
UpperCamelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCamelCase_ = int(example.pairID)
features.append(InputFeatures(**__lowercase , label=__lowercase , pairID=__lowercase))
for i, example in enumerate(examples[:5]):
logger.info('*** Example ***')
logger.info(f"""guid: {example}""")
logger.info(f"""features: {features[i]}""")
return features
snake_case__ : List[str] = {
"""hans""": 3,
}
snake_case__ : Union[str, Any] = {
"""hans""": HansProcessor,
}
| 618 | 1 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def snake_case_ ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ):
if alpha_transform_type == "cosine":
def alpha_bar_fn(lowercase__ ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(lowercase__ ):
return math.exp(t * -12.0 )
else:
raise ValueError(F"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
UpperCAmelCase__ : str = []
for i in range(lowercase__ ):
UpperCAmelCase__ : Any = i / num_diffusion_timesteps
UpperCAmelCase__ : Union[str, Any] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) )
return torch.tensor(lowercase__ , dtype=torch.floataa )
class UpperCAmelCase_ ( A , A ):
'''simple docstring'''
lowercase_ : Dict = [e.name for e in KarrasDiffusionSchedulers]
lowercase_ : List[str] = 2
@register_to_config
def __init__( self : Optional[Any] , snake_case__ : int = 10_00 , snake_case__ : float = 0.00085 , snake_case__ : float = 0.012 , snake_case__ : str = "linear" , snake_case__ : Optional[Union[np.ndarray, List[float]]] = None , snake_case__ : str = "epsilon" , snake_case__ : str = "linspace" , snake_case__ : int = 0 , ):
'''simple docstring'''
if trained_betas is not None:
UpperCAmelCase__ : Dict = torch.tensor(snake_case__ , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase__ : Dict = torch.linspace(snake_case__ , snake_case__ , snake_case__ , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase__ : Union[str, Any] = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case__ , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase__ : Optional[int] = betas_for_alpha_bar(snake_case__ )
else:
raise NotImplementedError(F"""{beta_schedule} does is not implemented for {self.__class__}""" )
UpperCAmelCase__ : Optional[Any] = 1.0 - self.betas
UpperCAmelCase__ : Union[str, Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case__ , snake_case__ , snake_case__ )
def UpperCamelCase ( self : Dict , snake_case__ : Optional[int] , snake_case__ : Tuple=None ):
'''simple docstring'''
if schedule_timesteps is None:
UpperCAmelCase__ : List[str] = self.timesteps
UpperCAmelCase__ : List[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase__ : str = 1 if len(snake_case__ ) > 1 else 0
else:
UpperCAmelCase__ : List[str] = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
UpperCAmelCase__ : Tuple = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCamelCase ( self : Union[str, Any] , snake_case__ : torch.FloatTensor , snake_case__ : Union[float, torch.FloatTensor] , ):
'''simple docstring'''
UpperCAmelCase__ : Dict = self.index_for_timestep(snake_case__ )
if self.state_in_first_order:
UpperCAmelCase__ : Dict = self.sigmas[step_index]
else:
UpperCAmelCase__ : Optional[Any] = self.sigmas_interpol[step_index]
UpperCAmelCase__ : Dict = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCamelCase ( self : Optional[int] , snake_case__ : int , snake_case__ : Union[str, torch.device] = None , snake_case__ : Optional[int] = None , ):
'''simple docstring'''
UpperCAmelCase__ : str = num_inference_steps
UpperCAmelCase__ : Optional[int] = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase__ : Tuple = np.linspace(0 , num_train_timesteps - 1 , snake_case__ , dtype=snake_case__ )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase__ : Dict = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Tuple = (np.arange(0 , snake_case__ ) * step_ratio).round()[::-1].copy().astype(snake_case__ )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase__ : List[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : int = (np.arange(snake_case__ , 0 , -step_ratio )).round().copy().astype(snake_case__ )
timesteps -= 1
else:
raise ValueError(
F"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
UpperCAmelCase__ : Optional[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase__ : Optional[Any] = torch.from_numpy(np.log(snake_case__ ) ).to(snake_case__ )
UpperCAmelCase__ : List[Any] = np.interp(snake_case__ , np.arange(0 , len(snake_case__ ) ) , snake_case__ )
UpperCAmelCase__ : Dict = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase__ : Optional[int] = torch.from_numpy(snake_case__ ).to(device=snake_case__ )
# interpolate sigmas
UpperCAmelCase__ : Optional[Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp()
UpperCAmelCase__ : str = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase__ : Tuple = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(snake_case__ ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase__ : Any = torch.from_numpy(snake_case__ ).to(snake_case__ , dtype=torch.floataa )
else:
UpperCAmelCase__ : str = torch.from_numpy(snake_case__ ).to(snake_case__ )
# interpolate timesteps
UpperCAmelCase__ : Tuple = self.sigma_to_t(snake_case__ ).to(snake_case__ , dtype=timesteps.dtype )
UpperCAmelCase__ : Any = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten()
UpperCAmelCase__ : List[Any] = torch.cat([timesteps[:1], interleaved_timesteps] )
UpperCAmelCase__ : Optional[Any] = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase__ : List[str] = defaultdict(snake_case__ )
def UpperCamelCase ( self : Optional[int] , snake_case__ : Dict ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = sigma.log()
# get distribution
UpperCAmelCase__ : List[Any] = log_sigma - self.log_sigmas[:, None]
# get sigmas range
UpperCAmelCase__ : str = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
UpperCAmelCase__ : List[Any] = low_idx + 1
UpperCAmelCase__ : Optional[Any] = self.log_sigmas[low_idx]
UpperCAmelCase__ : Optional[int] = self.log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase__ : Tuple = (low - log_sigma) / (low - high)
UpperCAmelCase__ : Tuple = w.clamp(0 , 1 )
# transform interpolation to time range
UpperCAmelCase__ : str = (1 - w) * low_idx + w * high_idx
UpperCAmelCase__ : Optional[int] = t.view(sigma.shape )
return t
@property
def UpperCamelCase ( self : int ):
'''simple docstring'''
return self.sample is None
def UpperCamelCase ( self : Any , snake_case__ : Union[torch.FloatTensor, np.ndarray] , snake_case__ : Union[float, torch.FloatTensor] , snake_case__ : Union[torch.FloatTensor, np.ndarray] , snake_case__ : bool = True , ):
'''simple docstring'''
UpperCAmelCase__ : int = self.index_for_timestep(snake_case__ )
# advance index counter by 1
UpperCAmelCase__ : int = timestep.cpu().item() if torch.is_tensor(snake_case__ ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase__ : List[Any] = self.sigmas[step_index]
UpperCAmelCase__ : Tuple = self.sigmas_interpol[step_index + 1]
UpperCAmelCase__ : List[Any] = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
UpperCAmelCase__ : Optional[int] = self.sigmas[step_index - 1]
UpperCAmelCase__ : List[str] = self.sigmas_interpol[step_index]
UpperCAmelCase__ : str = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : List[str] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : Optional[int] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase__ : List[Any] = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_interpol
UpperCAmelCase__ : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError("prediction_type not implemented yet: sample" )
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase__ : Tuple = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase__ : Optional[int] = sigma_interpol - sigma_hat
# store for 2nd order step
UpperCAmelCase__ : Any = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
UpperCAmelCase__ : List[str] = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
UpperCAmelCase__ : Optional[int] = sigma_next - sigma_hat
UpperCAmelCase__ : Dict = self.sample
UpperCAmelCase__ : str = None
UpperCAmelCase__ : List[Any] = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case__ )
def UpperCamelCase ( self : str , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , snake_case__ : torch.FloatTensor , ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case__ ):
# mps does not support float64
UpperCAmelCase__ : Optional[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase__ : Tuple = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase__ : str = self.timesteps.to(original_samples.device )
UpperCAmelCase__ : Union[str, Any] = timesteps.to(original_samples.device )
UpperCAmelCase__ : str = [self.index_for_timestep(snake_case__ , snake_case__ ) for t in timesteps]
UpperCAmelCase__ : str = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase__ : List[Any] = sigma.unsqueeze(-1 )
UpperCAmelCase__ : Dict = original_samples + noise * sigma
return noisy_samples
def __len__( self : List[str] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 199 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures""")
SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
SCREAMING_SNAKE_CASE = get_tests_dir("""fixtures/dummy-config.json""")
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = 0
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase ( self : List[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase__ : Optional[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(snake_case__ ).to_dict()
config_dict.pop("feature_extractor_type" )
UpperCAmelCase__ : Union[str, Any] = WavaVecaFeatureExtractor(**snake_case__ )
# save in new folder
model_config.save_pretrained(snake_case__ )
config.save_pretrained(snake_case__ )
UpperCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(snake_case__ )
# make sure private variable is not incorrectly saved
UpperCAmelCase__ : List[Any] = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def UpperCamelCase ( self : Tuple ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase ( self : Any ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(snake_case__ , revision="aaaaaa" )
def UpperCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
snake_case__ , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase ( self : Optional[Any] ):
'''simple docstring'''
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
UpperCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case__ )
UpperCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case__ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case__ )
UpperCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(snake_case__ , trust_remote_code=snake_case__ )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase ( self : str ):
'''simple docstring'''
try:
AutoConfig.register("custom" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase__ : str = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(snake_case__ )
UpperCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase ( self : str ):
'''simple docstring'''
class UpperCAmelCase_ ( A ):
'''simple docstring'''
lowercase_ : Optional[Any] = True
try:
AutoConfig.register("custom" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local
UpperCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case__ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase__ : int = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=snake_case__ )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(snake_case__ , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 199 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a : str = {
"""configuration_m2m_100""": ["""M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP""", """M2M100Config""", """M2M100OnnxConfig"""],
"""tokenization_m2m_100""": ["""M2M100Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : int = [
"""M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""M2M100ForConditionalGeneration""",
"""M2M100Model""",
"""M2M100PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
_a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 714 |
"""simple docstring"""
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a__ ( a : List[str] , a : Any ):
"""simple docstring"""
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
_snake_case : Any = flax_key_tuple[:-1] + ("weight",)
_snake_case : str = torch.permute(a , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(a ):
# linear layer
_snake_case : Optional[int] = flax_key_tuple[:-1] + ("weight",)
_snake_case : Any = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_snake_case : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def a__ ( a : List[Any] , a : Union[str, Any] , a : List[str] ):
"""simple docstring"""
if "metadata" in layer:
_snake_case : Optional[int] = layer.split("metadata" )
_snake_case : Optional[int] = "".join(split_layer[0] )[:-1]
_snake_case : int = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
_snake_case : Any = layer.split("kvstore" )
_snake_case : str = "".join(split_layer[0] )[:-1]
_snake_case : Any = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
_snake_case : List[Any] = layer.split("/" )
_snake_case : Tuple = "/".join(split_layer[:-1] )
_snake_case : int = (split_layer[-1],)
if "kvstore/path" in layer:
_snake_case : Optional[Any] = f'{switch_checkpoint_path}/{checkpoint_info[layer]}'
elif "kvstore/driver" in layer:
_snake_case : Tuple = "file"
else:
_snake_case : Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a__ ( a : List[Any] , a : List[Any] ):
"""simple docstring"""
_snake_case : Union[str, Any] = rename_keys(a )
_snake_case : int = {}
for k, v in current_block.items():
_snake_case : Optional[int] = v
_snake_case : Optional[int] = new_current_block
torch.save(a , a )
def a__ ( a : Dict , a : Tuple , a : List[str] , a : int , a : str = WEIGHTS_NAME ):
"""simple docstring"""
_snake_case : Any = convert_file_size_to_int(a )
_snake_case : Tuple = []
_snake_case : Optional[int] = {}
_snake_case : Tuple = 0
_snake_case : Optional[Any] = 0
os.makedirs(a , exist_ok=a )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
_snake_case : Any = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
_snake_case : Optional[Any] = flatten_dict(a , sep="/" )
_snake_case : Optional[Any] = {}
for layer in checkpoint_info.keys():
_snake_case , _snake_case , _snake_case : int = get_key_and_tensorstore_dict(
a , a , a )
if curr_real_layer_name in all_layers:
_snake_case : Dict = content
else:
_snake_case : Tuple = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
_snake_case : List[str] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
_snake_case : Dict = torch.tensor(a )
_snake_case : Dict = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
_snake_case , _snake_case : Optional[int] = rename_base_flax_keys(tuple(key.split("/" ) ) , a )
_snake_case : Optional[Any] = "/".join(a )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
_snake_case : Any = os.path.join(
a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
del current_block
_snake_case : List[Any] = {}
_snake_case : str = 0
_snake_case : List[str] = raw_weights.to(getattr(a , a ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
_snake_case : int = os.path.join(a , weights_name.replace(".bin" , f'-{len(a )+1:05d}-of-???.bin' ) )
rename_and_save_block(a , a )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(a ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
_snake_case : str = {}
_snake_case : Any = {}
for idx, shard in enumerate(a ):
_snake_case : Optional[int] = weights_name.replace(
".bin" , f'-{idx+1:05d}-of-{len(a ):05d}.bin' ) # len(sharded_state_dicts):05d}
_snake_case : Dict = os.path.join(a , weights_name.replace(".bin" , f'-{idx+1:05d}-of-???.bin' ) )
os.rename(a , os.path.join(a , a ) )
_snake_case : Dict = shard
for key in shard:
_snake_case : int = shard_file
# Add the metadata
_snake_case : List[Any] = {"total_size": total_size}
_snake_case : Any = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(a , a ) , "w" , encoding="utf-8" ) as f:
_snake_case : Union[str, Any] = json.dumps(a , indent=2 , sort_keys=a ) + "\n"
f.write(a )
return metadata, index
if __name__ == "__main__":
_a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--switch_t5x_checkpoint_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600""",
type=str,
required=False,
help="""Path to a directory containing a folder per layer. Follows the original Google format.""",
)
parser.add_argument("""--max_shard_size""", default="""10GB""", required=False, help="""Max shard size""")
parser.add_argument("""--dtype""", default="""bfloat16""", type=str, required=False, help="""dtype of the saved model""")
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted""",
type=str,
required=False,
help="""Path to the output pytorch model.""",
)
_a : Optional[int] = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a__ ( ):
"""simple docstring"""
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
_snake_case : List[str] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
_snake_case : str = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
_snake_case : List[Any] = TaTokenizer.from_pretrained("t5-small" )
_snake_case : Optional[Any] = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
_snake_case : Dict = tokenizer(a , return_tensors="pt" ).input_ids
_snake_case : List[Any] = model.generate(a , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 87 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""facebook/s2t-wav2vec2-large-en-de""": (
"""https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json"""
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech2text2
}
class _lowercase ( snake_case__ ):
_lowercase : int = 'speech_to_text_2'
_lowercase : List[Any] = ['past_key_values']
_lowercase : Any = {'num_attention_heads': 'decoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Tuple , lowerCamelCase__ : Tuple=1_0_0_0_0 , lowerCamelCase__ : Any=6 , lowerCamelCase__ : Optional[Any]=2_0_4_8 , lowerCamelCase__ : Dict=4 , lowerCamelCase__ : List[Any]=0.0 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : str="relu" , lowerCamelCase__ : int=2_5_6 , lowerCamelCase__ : Any=0.1 , lowerCamelCase__ : Dict=0.0 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : Optional[int]=0.02 , lowerCamelCase__ : Union[str, Any]=2 , lowerCamelCase__ : str=True , lowerCamelCase__ : str=1 , lowerCamelCase__ : Optional[Any]=0 , lowerCamelCase__ : Dict=2 , lowerCamelCase__ : Union[str, Any]=1_0_2_4 , **lowerCamelCase__ : Dict , ) -> List[Any]:
"""simple docstring"""
A_ = vocab_size
A_ = d_model
A_ = decoder_ffn_dim
A_ = decoder_layers
A_ = decoder_attention_heads
A_ = dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = activation_function
A_ = init_std
A_ = decoder_layerdrop
A_ = use_cache
A_ = decoder_layers
A_ = scale_embedding # scale factor will be sqrt(d_model) if True
A_ = max_target_positions
super().__init__(
pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , decoder_start_token_id=_UpperCamelCase , **_UpperCamelCase , )
| 203 |
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
UpperCamelCase__ = logging.get_logger(__name__)
def lowerCamelCase__ ( __A :Optional[int] ,__A :Any ,__A :str ):
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowerCamelCase__ ( __A :np.ndarray ,__A :Optional[str] ,__A :Optional[str] ):
"""simple docstring"""
__snake_case = to_pil_image(__A )
__snake_case , __snake_case = pil_image.size
__snake_case = pytesseract.image_to_data(__A ,lang=__A ,output_type="""dict""" ,config=__A )
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case = data["""text"""], data["""left"""], data["""top"""], data["""width"""], data["""height"""]
# filter empty words and corresponding coordinates
__snake_case = [idx for idx, word in enumerate(__A ) if not word.strip()]
__snake_case = [word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
__snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
__snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
__snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
__snake_case = [coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__snake_case = []
for x, y, w, h in zip(__A ,__A ,__A ,__A ):
__snake_case = [x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
__snake_case = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A ,__A ,__A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __snake_case ( snake_case__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = True , _UpperCamelCase = 1 / 2_55 , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = "" , **_UpperCamelCase , ) -> None:
"""simple docstring"""
super().__init__(**_UpperCamelCase )
__snake_case = size if size is not None else {"""height""": 2_24, """width""": 2_24}
__snake_case = get_size_dict(_UpperCamelCase )
__snake_case = do_resize
__snake_case = size
__snake_case = resample
__snake_case = do_rescale
__snake_case = rescale_value
__snake_case = do_normalize
__snake_case = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__snake_case = image_std if image_std is not None else IMAGENET_STANDARD_STD
__snake_case = apply_ocr
__snake_case = ocr_lang
__snake_case = tesseract_config
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = PILImageResampling.BILINEAR , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
"""simple docstring"""
__snake_case = get_size_dict(_UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
__snake_case = (size["""height"""], size["""width"""])
return resize(_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
"""simple docstring"""
return rescale(_UpperCamelCase , scale=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None , **_UpperCamelCase , ) -> np.ndarray:
"""simple docstring"""
return normalize(_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase , data_format=_UpperCamelCase , **_UpperCamelCase )
def a ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = ChannelDimension.FIRST , **_UpperCamelCase , ) -> PIL.Image.Image:
"""simple docstring"""
__snake_case = do_resize if do_resize is not None else self.do_resize
__snake_case = size if size is not None else self.size
__snake_case = get_size_dict(_UpperCamelCase )
__snake_case = resample if resample is not None else self.resample
__snake_case = do_rescale if do_rescale is not None else self.do_rescale
__snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor
__snake_case = do_normalize if do_normalize is not None else self.do_normalize
__snake_case = image_mean if image_mean is not None else self.image_mean
__snake_case = image_std if image_std is not None else self.image_std
__snake_case = apply_ocr if apply_ocr is not None else self.apply_ocr
__snake_case = ocr_lang if ocr_lang is not None else self.ocr_lang
__snake_case = tesseract_config if tesseract_config is not None else self.tesseract_config
__snake_case = make_list_of_images(_UpperCamelCase )
if not valid_images(_UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
__snake_case = [to_numpy_array(_UpperCamelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , """pytesseract""" )
__snake_case = []
__snake_case = []
for image in images:
__snake_case , __snake_case = apply_tesseract(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
words_batch.append(_UpperCamelCase )
boxes_batch.append(_UpperCamelCase )
if do_resize:
__snake_case = [self.resize(image=_UpperCamelCase , size=_UpperCamelCase , resample=_UpperCamelCase ) for image in images]
if do_rescale:
__snake_case = [self.rescale(image=_UpperCamelCase , scale=_UpperCamelCase ) for image in images]
if do_normalize:
__snake_case = [self.normalize(image=_UpperCamelCase , mean=_UpperCamelCase , std=_UpperCamelCase ) for image in images]
__snake_case = [to_channel_dimension_format(_UpperCamelCase , _UpperCamelCase ) for image in images]
__snake_case = BatchFeature(data={"""pixel_values""": images} , tensor_type=_UpperCamelCase )
if apply_ocr:
__snake_case = words_batch
__snake_case = boxes_batch
return data
| 268 | 0 |
from __future__ import annotations
from cmath import sqrt
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> tuple[complex, complex]:
if a == 0:
raise ValueError('''Coefficient \'a\' must not be zero.''' )
snake_case__ = b * b - 4 * a * c
snake_case__ = (-b + sqrt(__lowerCAmelCase )) / (2 * a)
snake_case__ = (-b - sqrt(__lowerCAmelCase )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
snake_case__ , snake_case__ = quadratic_roots(a=5 , b=6 , c=1 )
print(F"""The solutions are: {solutiona} and {solutiona}""" )
if __name__ == "__main__":
main()
| 208 |
from jiwer import compute_measures
import datasets
lowerCamelCase__ : Optional[int] = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
lowerCamelCase__ : Union[str, Any] = """\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
"""
lowerCamelCase__ : int = """
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> wer = datasets.load_metric(\"wer\")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __magic_name__ (datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/jitsi/jiwer/'''] , reference_urls=[
'''https://en.wikipedia.org/wiki/Word_error_rate''',
] , )
def SCREAMING_SNAKE_CASE__ ( self:Union[str, Any] , _a:Dict=None , _a:Dict=None , _a:Optional[int]=False ):
if concatenate_texts:
return compute_measures(_a , _a )["wer"]
else:
snake_case__ = 0
snake_case__ = 0
for prediction, reference in zip(_a , _a ):
snake_case__ = compute_measures(_a , _a )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 208 | 1 |
import pprint
import requests
__a : List[Any] = """https://zenquotes.io/api"""
def UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/today''' ).json()
def UpperCAmelCase ( ):
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + '''/random''' ).json()
if __name__ == "__main__":
__a : List[str] = random_quotes()
pprint.pprint(response) | 534 | from argparse import ArgumentParser
from .env import EnvironmentCommand
def UpperCAmelCase ( ):
"""simple docstring"""
__lowercase = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
__lowercase = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowercase = parser.parse_args()
if not hasattr(lowercase , '''func''' ):
parser.print_help()
exit(1 )
# Run
__lowercase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main() | 534 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase : Dict = {
"configuration_lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig"],
"tokenization_lxmert": ["LxmertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[str] = ["LxmertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : Optional[Any] = [
"LxmertEncoder",
"LxmertForPreTraining",
"LxmertForQuestionAnswering",
"LxmertModel",
"LxmertPreTrainedModel",
"LxmertVisualFeatureEncoder",
"LxmertXLayer",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase : List[Any] = [
"TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLxmertForPreTraining",
"TFLxmertMainLayer",
"TFLxmertModel",
"TFLxmertPreTrainedModel",
"TFLxmertVisualFeatureEncoder",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
UpperCamelCase : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293 |
"""simple docstring"""
def A ( snake_case :int ) -> bool:
return str(snake_case ) == str(snake_case )[::-1]
def A ( snake_case :int ) -> int:
return int(snake_case ) + int(str(snake_case )[::-1] )
def A ( snake_case :int = 1_0_0_0_0 ) -> int:
__UpperCamelCase = []
for num in range(1 , snake_case ):
__UpperCamelCase = 0
__UpperCamelCase = num
while iterations < 5_0:
__UpperCamelCase = sum_reverse(snake_case )
iterations += 1
if is_palindrome(snake_case ):
break
else:
lychrel_nums.append(snake_case )
return len(snake_case )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 293 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.