code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import struct
import unittest
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : bytes ) -> None:
'''simple docstring'''
lowercase : Union[str, Any] =data
# Initialize hash values
lowercase : Dict =[
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
lowercase : Tuple =[
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
lowercase : Dict =self.preprocessing(self.data )
self.final_hash()
@staticmethod
def A__ ( UpperCAmelCase : bytes ) -> bytes:
'''simple docstring'''
lowercase : str =b'''\x80''' + (b'''\x00''' * (63 - (len(__A ) + 8) % 64))
lowercase : Tuple =struct.pack('''>Q''' , (len(__A ) * 8) )
return data + padding + big_endian_integer
def A__ ( self : Dict ) -> None:
'''simple docstring'''
lowercase : Union[str, Any] =[
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowercase : Tuple =list(struct.unpack('''>16L''' , __A ) )
# add 48 0-ed integers
words += [0] * 48
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowercase : Tuple =(
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowercase : List[Any] =(
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowercase : int =(
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
lowercase : Optional[int] =self.ror(__A , 6 ) ^ self.ror(__A , 11 ) ^ self.ror(__A , 25 )
lowercase : List[str] =(e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
lowercase : Union[str, Any] =(
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
lowercase : Optional[int] =self.ror(__A , 2 ) ^ self.ror(__A , 13 ) ^ self.ror(__A , 22 )
lowercase : Optional[Any] =(a & b) ^ (a & c) ^ (b & c)
lowercase : Union[str, Any] =(sa + maj) % 0X100_000_000
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase : List[Any] =(
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
lowercase : Dict =[a, b, c, d, e, f, g, h]
# Modify final values
lowercase : Optional[Any] =[
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
lowercase : Any =''''''.join([hex(__A )[2:].zfill(8 ) for value in self.hashes] )
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int ) -> int:
'''simple docstring'''
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Dict ) -> None:
'''simple docstring'''
import hashlib
lowercase : int =bytes('''Test String''' , '''utf-8''' )
self.assertEqual(SHAaaa(__A ).hash , hashlib.shaaaa(__A ).hexdigest() )
def lowercase_ ( ) -> List[str]:
"""simple docstring"""
import doctest
doctest.testmod()
lowercase : Optional[int] =argparse.ArgumentParser()
parser.add_argument(
'''-s''' , '''--string''' , dest='''input_string''' , default='''Hello World!! Welcome to Cryptography''' , help='''Hash the string''' , )
parser.add_argument(
'''-f''' , '''--file''' , dest='''input_file''' , help='''Hash contents of a file''' )
lowercase : List[Any] =parser.parse_args()
lowercase : Optional[int] =args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , '''rb''' ) as f:
lowercase : Dict =f.read()
else:
lowercase : Tuple =bytes(_lowercase , '''utf-8''' )
print(SHAaaa(_lowercase ).hash )
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class UpperCAmelCase_ ( __snake_case ):
"""simple docstring"""
def __init__( self : Optional[int] , *UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(*_lowercase , **_lowercase )
lowercase : int =eval_examples
lowercase : Optional[Any] =post_process_function
def A__ ( self : Dict , UpperCAmelCase : int = None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : List[str] = "eval" , **UpperCAmelCase : Union[str, Any] , ) -> Tuple:
'''simple docstring'''
lowercase : Any =gen_kwargs.copy()
lowercase : int =(
gen_kwargs["""max_length"""] if gen_kwargs.get('''max_length''' ) is not None else self.args.generation_max_length
)
lowercase : Optional[int] =(
gen_kwargs["""num_beams"""] if gen_kwargs.get('''num_beams''' ) is not None else self.args.generation_num_beams
)
lowercase : Tuple =gen_kwargs
lowercase : Optional[int] =self.eval_dataset if eval_dataset is None else eval_dataset
lowercase : Optional[Any] =self.get_eval_dataloader(_lowercase )
lowercase : Dict =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowercase : Dict =self.compute_metrics
lowercase : List[Any] =None
lowercase : Dict =time.time()
lowercase : Tuple =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase : Optional[Any] =eval_loop(
_lowercase , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
lowercase : List[str] =compute_metrics
lowercase : Any =self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowercase : Union[str, Any] =self.post_process_function(_lowercase , _lowercase , _lowercase )
lowercase : Tuple =self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase : int =metrics.pop(_lowercase )
metrics.update(output.metrics )
else:
lowercase : Any =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(_lowercase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowercase : str =self.callback_handler.on_evaluate(self.args , self.state , self.control , _lowercase )
return metrics
def A__ ( self : Tuple , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : str=None , UpperCAmelCase : int = "test" , **UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
lowercase : Dict =gen_kwargs.copy()
lowercase : Tuple =self.get_test_dataloader(_lowercase )
# Temporarily disable metric computation, we will do it in the loop here.
lowercase : Dict =self.compute_metrics
lowercase : List[str] =None
lowercase : Union[str, Any] =time.time()
lowercase : List[Any] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowercase : str =eval_loop(
_lowercase , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=_lowercase , metric_key_prefix=_lowercase , )
finally:
lowercase : Optional[int] =compute_metrics
lowercase : Any =self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
_lowercase , _lowercase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowercase : str =self.post_process_function(_lowercase , _lowercase , _lowercase , '''predict''' )
lowercase : Tuple =self.compute_metrics(_lowercase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
lowercase : Any =metrics.pop(_lowercase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=_lowercase )
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : list ) -> list:
"""simple docstring"""
if len(lowercase_ ) <= 1:
return [tuple(lowercase_ )]
lowercase : Optional[Any] =[]
def generate(__A : int , __A : list ):
lowercase : int =[0] * n
res.append(tuple(lowercase_ ) )
lowercase : List[Any] =0
while i < n:
if c[i] < i:
if i % 2 == 0:
lowercase : Optional[Any] =arr[i], arr[0]
else:
lowercase : List[Any] =arr[i], arr[c[i]]
res.append(tuple(lowercase_ ) )
c[i] += 1
lowercase : List[str] =0
else:
lowercase : str =0
i += 1
generate(len(lowercase_ ) , lowercase_ )
return res
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
from math import isqrt, loga
def lowercase_ ( __A : int ) -> list[int]:
"""simple docstring"""
lowercase : Dict =[True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , lowerCAmelCase__ , lowerCAmelCase__ ):
lowercase : List[str] =False
return [i for i in range(2 , lowerCAmelCase__ ) if is_prime[i]]
def lowercase_ ( __A : int = 8_0_0_8_0_0 , __A : int = 8_0_0_8_0_0 ) -> int:
"""simple docstring"""
lowercase : Dict =degree * loga(lowerCAmelCase__ )
lowercase : Any =int(lowerCAmelCase__ )
lowercase : List[Any] =calculate_prime_numbers(lowerCAmelCase__ )
lowercase : List[Any] =0
lowercase : str =0
lowercase : Tuple =len(lowerCAmelCase__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"""{solution() = }""")
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : Dict , ) -> List[str]:
'''simple docstring'''
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
lowercase : str =(
f'The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`'
f' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure '
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
lowercase : Union[str, Any] =dict(scheduler.config )
lowercase : Optional[int] =1
lowercase : Optional[int] =FrozenDict(UpperCamelCase__ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
lowercase : Any =(
f'The configuration file of this scheduler: {scheduler} has not set the configuration'
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , UpperCamelCase__ , standard_warn=UpperCamelCase__ )
lowercase : str =dict(scheduler.config )
lowercase : Optional[int] =True
lowercase : List[str] =FrozenDict(UpperCamelCase__ )
if safety_checker is None:
logger.warning(
f'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=UpperCamelCase__ , segmentation_processor=UpperCamelCase__ , vae=UpperCamelCase__ , text_encoder=UpperCamelCase__ , tokenizer=UpperCamelCase__ , unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , safety_checker=UpperCamelCase__ , feature_extractor=UpperCamelCase__ , )
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] = "auto" ) -> List[Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowercase : Dict =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase__ )
def A__ ( self : int ) -> Any:
'''simple docstring'''
self.enable_attention_slicing(UpperCamelCase__ )
def A__ ( self : Dict ) -> List[Any]:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowercase : List[str] =torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase__ , UpperCamelCase__ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : int = 512 , UpperCAmelCase : List[Any] = 512 , UpperCAmelCase : List[str] = 50 , UpperCAmelCase : Optional[Any] = 7.5 , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 0.0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : List[str] = "pil" , UpperCAmelCase : str = True , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Dict = 1 , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
lowercase : Tuple =self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
lowercase : Optional[int] =self.segmentation_model(**UpperCamelCase__ )
lowercase : List[Any] =torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowercase : Tuple =self.numpy_to_pil(UpperCamelCase__ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowercase : Dict =StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase__ , image=UpperCamelCase__ , mask_image=UpperCamelCase__ , height=UpperCamelCase__ , width=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , guidance_scale=UpperCamelCase__ , negative_prompt=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , eta=UpperCamelCase__ , generator=UpperCamelCase__ , latents=UpperCamelCase__ , output_type=UpperCamelCase__ , return_dict=UpperCamelCase__ , callback=UpperCamelCase__ , callback_steps=UpperCamelCase__ , )
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...feature_extraction_utils import FeatureExtractionMixin
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"""deepmind/language-perceiver""": """https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json""",
# See all Perceiver models at https://huggingface.co/models?filter=perceiver
}
class UpperCAmelCase_ ( snake_case_ ):
"""simple docstring"""
UpperCamelCase_ = """perceiver"""
def __init__( self : Tuple , UpperCAmelCase : List[str]=256 , UpperCAmelCase : Dict=1280 , UpperCAmelCase : List[str]=768 , UpperCAmelCase : Tuple=1 , UpperCAmelCase : str=26 , UpperCAmelCase : str=8 , UpperCAmelCase : Dict=8 , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : Any="kv" , UpperCAmelCase : int=1 , UpperCAmelCase : Optional[int]=1 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : str=0.1 , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : List[str]=1e-12 , UpperCAmelCase : str=True , UpperCAmelCase : Any=262 , UpperCAmelCase : Tuple=2048 , UpperCAmelCase : Any=56 , UpperCAmelCase : int=[368, 496] , UpperCAmelCase : Optional[int]=16 , UpperCAmelCase : Optional[int]=1920 , UpperCAmelCase : int=16 , UpperCAmelCase : int=[1, 16, 224, 224] , **UpperCAmelCase : Any , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : int =num_latents
lowercase : str =d_latents
lowercase : Dict =d_model
lowercase : Union[str, Any] =num_blocks
lowercase : Tuple =num_self_attends_per_block
lowercase : Union[str, Any] =num_self_attention_heads
lowercase : Optional[Any] =num_cross_attention_heads
lowercase : Union[str, Any] =qk_channels
lowercase : Tuple =v_channels
lowercase : Optional[int] =cross_attention_shape_for_attention
lowercase : Dict =self_attention_widening_factor
lowercase : Tuple =cross_attention_widening_factor
lowercase : int =hidden_act
lowercase : str =attention_probs_dropout_prob
lowercase : Union[str, Any] =initializer_range
lowercase : Optional[int] =layer_norm_eps
lowercase : List[str] =use_query_residual
# masked language modeling attributes
lowercase : Any =vocab_size
lowercase : List[str] =max_position_embeddings
# image classification attributes
lowercase : Union[str, Any] =image_size
# flow attributes
lowercase : List[str] =train_size
# multimodal autoencoding attributes
lowercase : Any =num_frames
lowercase : Dict =audio_samples_per_frame
lowercase : int =samples_per_patch
lowercase : Optional[int] =output_shape
class UpperCAmelCase_ ( snake_case_ ):
"""simple docstring"""
@property
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
if self.task == "multiple-choice":
lowercase : Any ={0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
lowercase : List[Any] ={0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''inputs''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
@property
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
return 1e-4
def A__ ( self : List[str] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] = -1 , UpperCAmelCase : List[str] = -1 , UpperCAmelCase : Optional[Any] = -1 , UpperCAmelCase : List[Any] = False , UpperCAmelCase : str = None , UpperCAmelCase : Tuple = 3 , UpperCAmelCase : str = 40 , UpperCAmelCase : List[str] = 40 , ) -> int:
'''simple docstring'''
if isinstance(UpperCAmelCase , UpperCAmelCase ):
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : Any =compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowercase : int =preprocessor.num_special_tokens_to_add(UpperCAmelCase )
lowercase : Union[str, Any] =compute_effective_axis_dimension(
UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase )
# Generate dummy inputs according to compute batch and sequence
lowercase : Dict =[''' '''.join(['''a'''] ) * seq_length] * batch_size
lowercase : List[Any] =dict(preprocessor(UpperCAmelCase , return_tensors=UpperCAmelCase ) )
lowercase : str =inputs.pop('''input_ids''' )
return inputs
elif isinstance(UpperCAmelCase , UpperCAmelCase ) and preprocessor.model_input_names[0] == "pixel_values":
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowercase : Union[str, Any] =compute_effective_axis_dimension(UpperCAmelCase , fixed_dimension=OnnxConfig.default_fixed_batch )
lowercase : Optional[Any] =self._generate_dummy_images(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : int =dict(preprocessor(images=UpperCAmelCase , return_tensors=UpperCAmelCase ) )
lowercase : Dict =inputs.pop('''pixel_values''' )
return inputs
else:
raise ValueError(
'''Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.''' )
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = '▁'
SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/nllb-200-distilled-600M': (
'https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'
),
}
}
SCREAMING_SNAKE_CASE = {
'facebook/nllb-200-distilled-600M': 1_024,
}
# fmt: off
SCREAMING_SNAKE_CASE = ['ace_Arab', 'ace_Latn', 'acm_Arab', 'acq_Arab', 'aeb_Arab', 'afr_Latn', 'ajp_Arab', 'aka_Latn', 'amh_Ethi', 'apc_Arab', 'arb_Arab', 'ars_Arab', 'ary_Arab', 'arz_Arab', 'asm_Beng', 'ast_Latn', 'awa_Deva', 'ayr_Latn', 'azb_Arab', 'azj_Latn', 'bak_Cyrl', 'bam_Latn', 'ban_Latn', 'bel_Cyrl', 'bem_Latn', 'ben_Beng', 'bho_Deva', 'bjn_Arab', 'bjn_Latn', 'bod_Tibt', 'bos_Latn', 'bug_Latn', 'bul_Cyrl', 'cat_Latn', 'ceb_Latn', 'ces_Latn', 'cjk_Latn', 'ckb_Arab', 'crh_Latn', 'cym_Latn', 'dan_Latn', 'deu_Latn', 'dik_Latn', 'dyu_Latn', 'dzo_Tibt', 'ell_Grek', 'eng_Latn', 'epo_Latn', 'est_Latn', 'eus_Latn', 'ewe_Latn', 'fao_Latn', 'pes_Arab', 'fij_Latn', 'fin_Latn', 'fon_Latn', 'fra_Latn', 'fur_Latn', 'fuv_Latn', 'gla_Latn', 'gle_Latn', 'glg_Latn', 'grn_Latn', 'guj_Gujr', 'hat_Latn', 'hau_Latn', 'heb_Hebr', 'hin_Deva', 'hne_Deva', 'hrv_Latn', 'hun_Latn', 'hye_Armn', 'ibo_Latn', 'ilo_Latn', 'ind_Latn', 'isl_Latn', 'ita_Latn', 'jav_Latn', 'jpn_Jpan', 'kab_Latn', 'kac_Latn', 'kam_Latn', 'kan_Knda', 'kas_Arab', 'kas_Deva', 'kat_Geor', 'knc_Arab', 'knc_Latn', 'kaz_Cyrl', 'kbp_Latn', 'kea_Latn', 'khm_Khmr', 'kik_Latn', 'kin_Latn', 'kir_Cyrl', 'kmb_Latn', 'kon_Latn', 'kor_Hang', 'kmr_Latn', 'lao_Laoo', 'lvs_Latn', 'lij_Latn', 'lim_Latn', 'lin_Latn', 'lit_Latn', 'lmo_Latn', 'ltg_Latn', 'ltz_Latn', 'lua_Latn', 'lug_Latn', 'luo_Latn', 'lus_Latn', 'mag_Deva', 'mai_Deva', 'mal_Mlym', 'mar_Deva', 'min_Latn', 'mkd_Cyrl', 'plt_Latn', 'mlt_Latn', 'mni_Beng', 'khk_Cyrl', 'mos_Latn', 'mri_Latn', 'zsm_Latn', 'mya_Mymr', 'nld_Latn', 'nno_Latn', 'nob_Latn', 'npi_Deva', 'nso_Latn', 'nus_Latn', 'nya_Latn', 'oci_Latn', 'gaz_Latn', 'ory_Orya', 'pag_Latn', 'pan_Guru', 'pap_Latn', 'pol_Latn', 'por_Latn', 'prs_Arab', 'pbt_Arab', 'quy_Latn', 'ron_Latn', 'run_Latn', 'rus_Cyrl', 'sag_Latn', 'san_Deva', 'sat_Beng', 'scn_Latn', 'shn_Mymr', 'sin_Sinh', 'slk_Latn', 'slv_Latn', 'smo_Latn', 'sna_Latn', 'snd_Arab', 'som_Latn', 'sot_Latn', 'spa_Latn', 'als_Latn', 'srd_Latn', 'srp_Cyrl', 'ssw_Latn', 'sun_Latn', 'swe_Latn', 'swh_Latn', 'szl_Latn', 'tam_Taml', 'tat_Cyrl', 'tel_Telu', 'tgk_Cyrl', 'tgl_Latn', 'tha_Thai', 'tir_Ethi', 'taq_Latn', 'taq_Tfng', 'tpi_Latn', 'tsn_Latn', 'tso_Latn', 'tuk_Latn', 'tum_Latn', 'tur_Latn', 'twi_Latn', 'tzm_Tfng', 'uig_Arab', 'ukr_Cyrl', 'umb_Latn', 'urd_Arab', 'uzn_Latn', 'vec_Latn', 'vie_Latn', 'war_Latn', 'wol_Latn', 'xho_Latn', 'ydd_Hebr', 'yor_Latn', 'yue_Hant', 'zho_Hans', 'zho_Hant', 'zul_Latn']
class UpperCAmelCase_ ( _UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ = []
UpperCamelCase_ = []
def __init__( self : str , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any]="<s>" , UpperCAmelCase : Any="</s>" , UpperCAmelCase : Any="</s>" , UpperCAmelCase : List[str]="<s>" , UpperCAmelCase : int="<unk>" , UpperCAmelCase : Optional[Any]="<pad>" , UpperCAmelCase : str="<mask>" , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : int=None , UpperCAmelCase : Tuple = None , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]=False , **UpperCAmelCase : List[Any] , ) -> int:
'''simple docstring'''
lowercase : List[Any] =AddedToken(lowercase__ , lstrip=lowercase__ , rstrip=lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else mask_token
lowercase : str ={} if sp_model_kwargs is None else sp_model_kwargs
lowercase : Optional[Any] =legacy_behaviour
super().__init__(
bos_token=lowercase__ , eos_token=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , cls_token=lowercase__ , pad_token=lowercase__ , mask_token=lowercase__ , tokenizer_file=lowercase__ , src_lang=lowercase__ , tgt_lang=lowercase__ , additional_special_tokens=lowercase__ , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=lowercase__ , **lowercase__ , )
lowercase : Optional[Any] =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowercase__ ) )
lowercase : Tuple =vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase : Any ={"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase : Dict =1
lowercase : int =len(self.sp_model )
lowercase : Union[str, Any] ={
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowercase__ )
}
lowercase : Optional[Any] ={v: k for k, v in self.lang_code_to_id.items()}
lowercase : List[Any] =len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowercase : Union[str, Any] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowercase : Tuple =list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowercase : Dict =src_lang if src_lang is not None else "eng_Latn"
lowercase : List[Any] =self.lang_code_to_id[self._src_lang]
lowercase : List[Any] =tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : str ) -> Tuple:
'''simple docstring'''
lowercase : str =self.__dict__.copy()
lowercase : Optional[Any] =None
lowercase : Any =self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] =d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase : str ={}
lowercase : Tuple =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
return self._src_lang
@src_lang.setter
def A__ ( self : Dict , UpperCAmelCase : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] =new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def A__ ( self : Dict , UpperCAmelCase : Any , UpperCAmelCase : int = None , UpperCAmelCase : Dict = False ) -> Union[str, Any]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowercase__ , token_ids_a=lowercase__ , already_has_special_tokens=lowercase__ )
lowercase : List[str] =[1] * len(self.prefix_tokens )
lowercase : Any =[1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(lowercase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowercase__ )) + ([0] * len(lowercase__ )) + suffix_ones
def A__ ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Any = None ) -> Union[str, Any]:
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def A__ ( self : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] = None ) -> List[str]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : str =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Any , **UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
lowercase : List[Any] =src_lang
lowercase : Tuple =self(lowercase__ , add_special_tokens=lowercase__ , return_tensors=lowercase__ , **lowercase__ )
lowercase : List[Any] =self.convert_tokens_to_ids(lowercase__ )
lowercase : str =tgt_lang_id
return inputs
def A__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ={self.convert_ids_to_tokens(lowercase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A__ ( self : str , UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
return self.sp_model.encode(lowercase__ , out_type=lowercase__ )
def A__ ( self : Optional[Any] , UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase : List[Any] =self.sp_model.PieceToId(lowercase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A__ ( self : Any , UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A__ ( self : List[Any] , UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ="".join(lowercase__ ).replace(lowercase__ , ''' ''' ).strip()
return out_string
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] = None ) -> Dict:
'''simple docstring'''
if not os.path.isdir(lowercase__ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Tuple =os.path.join(
lowercase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowercase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowercase__ , '''wb''' ) as fi:
lowercase : List[str] =self.sp_model.serialized_model_proto()
fi.write(lowercase__ )
return (out_vocab_file,)
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any = "eng_Latn" , UpperCAmelCase : Dict = None , UpperCAmelCase : Dict = "fra_Latn" , **UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
lowercase : str =src_lang
lowercase : List[Any] =tgt_lang
return super().prepare_seqaseq_batch(lowercase__ , lowercase__ , **lowercase__ )
def A__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def A__ ( self : Any , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : Any =self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowercase : Dict =[]
lowercase : Optional[Any] =[self.eos_token_id, self.cur_lang_code]
else:
lowercase : Union[str, Any] =[self.cur_lang_code]
lowercase : Dict =[self.eos_token_id]
def A__ ( self : Tuple , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowercase : List[str] =[]
lowercase : Union[str, Any] =[self.eos_token_id, self.cur_lang_code]
else:
lowercase : Tuple =[self.cur_lang_code]
lowercase : List[str] =[self.eos_token_id]
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=13 , UpperCAmelCase : str=7 , UpperCAmelCase : Any=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=False , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Optional[Any]=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : List[Any]=4 , UpperCAmelCase : List[Any]=64 , UpperCAmelCase : List[Any]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=512 , UpperCAmelCase : Tuple=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Union[str, Any]=3 , UpperCAmelCase : Any=4 , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : int=2 , UpperCAmelCase : Dict=2 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : str=1 , ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] =parent
lowercase : int =batch_size
lowercase : str =seq_length
lowercase : Tuple =is_training
lowercase : List[str] =use_input_mask
lowercase : Optional[int] =use_token_type_ids
lowercase : Any =use_labels
lowercase : List[Any] =vocab_size
lowercase : str =hidden_size
lowercase : str =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : int =intermediate_size
lowercase : Optional[int] =hidden_act
lowercase : Optional[Any] =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Union[str, Any] =max_position_embeddings
lowercase : Optional[int] =type_vocab_size
lowercase : Dict =type_sequence_label_size
lowercase : Optional[int] =initializer_range
lowercase : List[Any] =num_labels
lowercase : Any =num_choices
lowercase : str =scope
lowercase : Any =q_groups
lowercase : Any =k_groups
lowercase : Union[str, Any] =v_groups
lowercase : int =post_attention_groups
lowercase : str =intermediate_groups
lowercase : Union[str, Any] =output_groups
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Tuple =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : Tuple =None
if self.use_labels:
lowercase : str =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : int =ids_tensor([self.batch_size] , self.num_choices )
lowercase : str =self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =SqueezeBertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[str] =model(UpperCAmelCase , UpperCAmelCase )
lowercase : Any =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =SqueezeBertForMaskedLM(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Any =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : int ) -> Any:
'''simple docstring'''
lowercase : Any =SqueezeBertForQuestionAnswering(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[str] =self.num_labels
lowercase : Dict =SqueezeBertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Union[str, Any] =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Any , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.num_labels
lowercase : Dict =SqueezeBertForTokenClassification(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =self.num_choices
lowercase : Union[str, Any] =SqueezeBertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[str] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[Any] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =self.prepare_config_and_inputs()
(lowercase) : List[Any] =config_and_inputs
lowercase : Dict ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': SqueezeBertModel,
'''fill-mask''': SqueezeBertForMaskedLM,
'''question-answering''': SqueezeBertForQuestionAnswering,
'''text-classification''': SqueezeBertForSequenceClassification,
'''token-classification''': SqueezeBertForTokenClassification,
'''zero-shot''': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = True
UpperCamelCase_ = False
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =SqueezeBertModelTester(self )
lowercase : Dict =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*UpperCAmelCase )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*UpperCAmelCase )
@slow
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Optional[int] =SqueezeBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : int =SqueezeBertForSequenceClassification.from_pretrained('''squeezebert/squeezebert-mnli''' )
lowercase : str =torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]] )
lowercase : Any =model(UpperCAmelCase )[0]
lowercase : Tuple =torch.Size((1, 3) )
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : int =torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(UpperCAmelCase , UpperCAmelCase , atol=1e-4 ) )
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
import math
def lowercase_ ( __A : list , __A : int ) -> int:
"""simple docstring"""
lowercase : Dict =len(__lowercase )
lowercase : List[str] =int(math.floor(math.sqrt(__lowercase ) ) )
lowercase : int =0
while arr[min(__lowercase , __lowercase ) - 1] < x:
lowercase : Optional[int] =step
step += int(math.floor(math.sqrt(__lowercase ) ) )
if prev >= n:
return -1
while arr[prev] < x:
lowercase : Tuple =prev + 1
if prev == min(__lowercase , __lowercase ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE = [int(item) for item in user_input.split(',')]
SCREAMING_SNAKE_CASE = int(input('Enter the number to be searched:\n'))
SCREAMING_SNAKE_CASE = jump_search(arr, x)
if res == -1:
print('Number not found!')
else:
print(f"""Number {x} is at index {res}""")
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self : List[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' , _A , )
super().__init__(*_A , **_A )
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class UpperCAmelCase_ ( __a ):
"""simple docstring"""
UpperCamelCase_ = '''efficientnet'''
def __init__( self : Tuple , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 600 , UpperCAmelCase : float = 2.0 , UpperCAmelCase : float = 3.1 , UpperCAmelCase : int = 8 , UpperCAmelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] , UpperCAmelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] , UpperCAmelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] , UpperCAmelCase : List[int] = [] , UpperCAmelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] , UpperCAmelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] , UpperCAmelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] , UpperCAmelCase : float = 0.2_5 , UpperCAmelCase : str = "swish" , UpperCAmelCase : int = 2560 , UpperCAmelCase : str = "mean" , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : float = 0.0_0_1 , UpperCAmelCase : float = 0.9_9 , UpperCAmelCase : float = 0.5 , UpperCAmelCase : float = 0.2 , **UpperCAmelCase : Optional[Any] , ) -> int:
'''simple docstring'''
super().__init__(**snake_case__ )
lowercase : Union[str, Any] =num_channels
lowercase : List[Any] =image_size
lowercase : int =width_coefficient
lowercase : Tuple =depth_coefficient
lowercase : Any =depth_divisor
lowercase : Optional[Any] =kernel_sizes
lowercase : int =in_channels
lowercase : Tuple =out_channels
lowercase : Tuple =depthwise_padding
lowercase : Any =strides
lowercase : Union[str, Any] =num_block_repeats
lowercase : int =expand_ratios
lowercase : Optional[int] =squeeze_expansion_ratio
lowercase : str =hidden_act
lowercase : Optional[Any] =hidden_dim
lowercase : Dict =pooling_type
lowercase : List[Any] =initializer_range
lowercase : Union[str, Any] =batch_norm_eps
lowercase : Any =batch_norm_momentum
lowercase : List[str] =dropout_rate
lowercase : Union[str, Any] =drop_connect_rate
lowercase : int =sum(snake_case__ ) * 4
class UpperCAmelCase_ ( __a ):
"""simple docstring"""
UpperCamelCase_ = version.parse('''1.11''' )
@property
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
return 1e-5
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
SCREAMING_SNAKE_CASE = '\\n\n'
SCREAMING_SNAKE_CASE = '\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n'
SCREAMING_SNAKE_CASE = '\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 78.22\n >>> print(round(results["perplexities"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric("perplexity")\n >>> input_texts = datasets.load_dataset("wikitext",\n ... "wikitext-2-raw-v1",\n ... split="test")["text"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results["mean_perplexity"], 2))\n 60.35\n >>> print(round(results["perplexities"][0], 2))\n 81.12\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : int ) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''input_texts''': datasets.Value('''string''' ),
} ) , reference_urls=['''https://huggingface.co/docs/transformers/perplexity'''] , )
def A__ ( self : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] = 16 , UpperCAmelCase : Any = True , UpperCAmelCase : Dict=None ) -> Tuple:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowercase : Optional[Any] ="cuda"
else:
lowercase : Tuple ="cuda" if torch.cuda.is_available() else "cpu"
lowercase : Tuple =AutoModelForCausalLM.from_pretrained(a_ )
lowercase : List[str] =model.to(a_ )
lowercase : Any =AutoTokenizer.from_pretrained(a_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowercase : str =list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'''pad_token''': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowercase : List[str] =model.config.max_length - 1
else:
lowercase : int =model.config.max_length
lowercase : Any =tokenizer(
a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors='''pt''' , return_attention_mask=a_ , ).to(a_ )
lowercase : Dict =encodings["input_ids"]
lowercase : Union[str, Any] =encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowercase : str =[]
lowercase : Union[str, Any] =CrossEntropyLoss(reduction='''none''' )
for start_index in logging.tqdm(range(0 , len(a_ ) , a_ ) ):
lowercase : Optional[Any] =min(start_index + batch_size , len(a_ ) )
lowercase : List[str] =encoded_texts[start_index:end_index]
lowercase : Optional[Any] =attn_masks[start_index:end_index]
if add_start_token:
lowercase : Any =torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a_ )
lowercase : List[str] =torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowercase : Tuple =torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a_ ), attn_mask] , dim=1 )
lowercase : Tuple =encoded_batch
with torch.no_grad():
lowercase : Dict =model(a_ , attention_mask=a_ ).logits
lowercase : List[str] =out_logits[..., :-1, :].contiguous()
lowercase : Any =labels[..., 1:].contiguous()
lowercase : int =attn_mask[..., 1:].contiguous()
lowercase : Optional[int] =torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a_ )}
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
SCREAMING_SNAKE_CASE = 'path-to-your-trained-model'
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
SCREAMING_SNAKE_CASE = 'A photo of sks dog in a bucket'
SCREAMING_SNAKE_CASE = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class UpperCAmelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
def __init__( self : int , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ) -> None:
'''simple docstring'''
warnings.warn(
'''The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use FlavaImageProcessor instead.''' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
SCREAMING_SNAKE_CASE = True
except (ImportError, AttributeError):
SCREAMING_SNAKE_CASE = object
def lowercase_ ( *__A : int , **__A : List[str] ) -> Any:
"""simple docstring"""
pass
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = logging.get_logger('transformers-cli/serving')
def lowercase_ ( __A : List[Any] ) -> str:
"""simple docstring"""
lowercase : str =pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
return ServeCommand(A_ , args.host , args.port , args.workers )
class UpperCAmelCase_ ( _A ):
"""simple docstring"""
UpperCamelCase_ = 42
class UpperCAmelCase_ ( _A ):
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class UpperCAmelCase_ ( _A ):
"""simple docstring"""
UpperCamelCase_ = 42
class UpperCAmelCase_ ( _A ):
"""simple docstring"""
UpperCamelCase_ = 42
class UpperCAmelCase_ ( _A ):
"""simple docstring"""
@staticmethod
def A__ ( UpperCAmelCase : ArgumentParser ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[str] =parser.add_parser(
'''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' )
serve_parser.add_argument(
'''--task''' , type=UpperCamelCase__ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , )
serve_parser.add_argument('''--host''' , type=UpperCamelCase__ , default='''localhost''' , help='''Interface the server will listen on.''' )
serve_parser.add_argument('''--port''' , type=UpperCamelCase__ , default=8888 , help='''Port the serving will listen to.''' )
serve_parser.add_argument('''--workers''' , type=UpperCamelCase__ , default=1 , help='''Number of http workers''' )
serve_parser.add_argument('''--model''' , type=UpperCamelCase__ , help='''Model\'s name or path to stored model.''' )
serve_parser.add_argument('''--config''' , type=UpperCamelCase__ , help='''Model\'s config name or path to stored model.''' )
serve_parser.add_argument('''--tokenizer''' , type=UpperCamelCase__ , help='''Tokenizer name to use.''' )
serve_parser.add_argument(
'''--device''' , type=UpperCamelCase__ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , )
serve_parser.set_defaults(func=UpperCamelCase__ )
def __init__( self : Optional[Any] , UpperCAmelCase : Pipeline , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =pipeline
lowercase : str =host
lowercase : Optional[Any] =port
lowercase : Optional[Any] =workers
if not _serve_dependencies_installed:
raise RuntimeError(
'''Using serve command requires FastAPI and uvicorn. '''
'''Please install transformers with [serving]: pip install \"transformers[serving]\".'''
'''Or install FastAPI and uvicorn separately.''' )
else:
logger.info(f'Serving model over {host}:{port}' )
lowercase : Optional[Any] =FastAPI(
routes=[
APIRoute(
'''/''' , self.model_info , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''GET'''] , ),
APIRoute(
'''/tokenize''' , self.tokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/detokenize''' , self.detokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''POST'''] , ),
APIRoute(
'''/forward''' , self.forward , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=['''POST'''] , ),
] , timeout=600 , )
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
run(self._app , host=self.host , port=self.port , workers=self.workers )
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def A__ ( self : List[Any] , UpperCAmelCase : str = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , UpperCAmelCase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ) -> int:
'''simple docstring'''
try:
lowercase : List[str] =self._pipeline.tokenizer.tokenize(UpperCamelCase__ )
if return_ids:
lowercase : Optional[int] =self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase__ )
return ServeTokenizeResult(tokens=UpperCamelCase__ , tokens_ids=UpperCamelCase__ )
else:
return ServeTokenizeResult(tokens=UpperCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(UpperCamelCase__ )} )
def A__ ( self : List[Any] , UpperCAmelCase : List[int] = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , UpperCAmelCase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , UpperCAmelCase : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , ) -> List[Any]:
'''simple docstring'''
try:
lowercase : str =self._pipeline.tokenizer.decode(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return ServeDeTokenizeResult(model='''''' , text=UpperCamelCase__ )
except Exception as e:
raise HTTPException(status_code=500 , detail={'''model''': '''''', '''error''': str(UpperCamelCase__ )} )
async def A__ ( self : List[str] , UpperCAmelCase : Optional[Any]=Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ) -> Dict:
'''simple docstring'''
if len(UpperCamelCase__ ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowercase : Any =self._pipeline(UpperCamelCase__ )
return ServeForwardResult(output=UpperCamelCase__ )
except Exception as e:
raise HTTPException(500 , {'''error''': str(UpperCamelCase__ )} )
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {'''configuration_mmbt''': ['''MMBTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings''']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : str =AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : List[Any] =tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Union[str, Any] =tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[str] =model(__a , labels=__a ).loss
lowercase : Optional[Any] =-tf.math.reduce_mean(__a ).numpy()
lowercase : str =-2_1.2_2_8_1_6_8
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['ConvNextFeatureExtractor']
SCREAMING_SNAKE_CASE = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase_ = 'char'
UpperCamelCase_ = 'bpe'
UpperCamelCase_ = 'wp'
SCREAMING_SNAKE_CASE = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class UpperCAmelCase_ ( __lowerCamelCase ):
"""simple docstring"""
UpperCamelCase_ = ['image_processor', 'char_tokenizer']
UpperCamelCase_ = 'ViTImageProcessor'
UpperCamelCase_ = 'MgpstrTokenizer'
def __init__( self : Union[str, Any] , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Any=None , **UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCamelCase_ , )
lowercase : str =kwargs.pop('''feature_extractor''' )
lowercase : List[str] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
lowercase : Dict =tokenizer
lowercase : str =AutoTokenizer.from_pretrained('''gpt2''' )
lowercase : Optional[Any] =AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(UpperCamelCase_ , UpperCamelCase_ )
def __call__( self : Optional[Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[Any]=None , **UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
lowercase : Dict =self.image_processor(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is not None:
lowercase : Tuple =self.char_tokenizer(UpperCamelCase_ , return_tensors=UpperCamelCase_ , **UpperCamelCase_ )
if text is None:
return inputs
elif images is None:
return encodings
else:
lowercase : List[str] =encodings['''input_ids''']
return inputs
def A__ ( self : Dict , UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
lowercase , lowercase , lowercase : List[str] =sequences
lowercase : Any =char_preds.size(0 )
lowercase , lowercase : Any =self._decode_helper(UpperCamelCase_ , '''char''' )
lowercase , lowercase : Optional[Any] =self._decode_helper(UpperCamelCase_ , '''bpe''' )
lowercase , lowercase : Tuple =self._decode_helper(UpperCamelCase_ , '''wp''' )
lowercase : Any =[]
lowercase : Optional[int] =[]
for i in range(UpperCamelCase_ ):
lowercase : Union[str, Any] =[char_scores[i], bpe_scores[i], wp_scores[i]]
lowercase : Optional[int] =[char_strs[i], bpe_strs[i], wp_strs[i]]
lowercase : List[str] =scores.index(max(UpperCamelCase_ ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
lowercase : Any ={}
lowercase : str =final_strs
lowercase : Optional[int] =final_scores
lowercase : List[str] =char_strs
lowercase : str =bpe_strs
lowercase : List[Any] =wp_strs
return out
def A__ ( self : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
if format == DecodeType.CHARACTER:
lowercase : Any =self.char_decode
lowercase : int =1
lowercase : Any ='''[s]'''
elif format == DecodeType.BPE:
lowercase : Dict =self.bpe_decode
lowercase : int =2
lowercase : Any ='''#'''
elif format == DecodeType.WORDPIECE:
lowercase : Union[str, Any] =self.wp_decode
lowercase : List[str] =102
lowercase : str ='''[SEP]'''
else:
raise ValueError(f'Format {format} is not supported.' )
lowercase , lowercase : Any =[], []
lowercase : List[str] =pred_logits.size(0 )
lowercase : List[Any] =pred_logits.size(1 )
lowercase , lowercase : Tuple =pred_logits.topk(1 , dim=-1 , largest=UpperCamelCase_ , sorted=UpperCamelCase_ )
lowercase : str =preds_index.view(-1 , UpperCamelCase_ )[:, 1:]
lowercase : Optional[int] =decoder(UpperCamelCase_ )
lowercase , lowercase : List[Any] =torch.nn.functional.softmax(UpperCamelCase_ , dim=2 ).max(dim=2 )
lowercase : int =preds_max_prob[:, 1:]
for index in range(UpperCamelCase_ ):
lowercase : int =preds_str[index].find(UpperCamelCase_ )
lowercase : List[str] =preds_str[index][:pred_eos]
lowercase : str =preds_index[index].cpu().tolist()
lowercase : Optional[Any] =pred_index.index(UpperCamelCase_ ) if eos_token in pred_index else -1
lowercase : int =preds_max_prob[index][: pred_eos_index + 1]
lowercase : Optional[Any] =pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(UpperCamelCase_ )
conf_scores.append(UpperCamelCase_ )
return dec_strs, conf_scores
def A__ ( self : Dict , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Any =[seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(UpperCamelCase_ )]
return decode_strs
def A__ ( self : int , UpperCAmelCase : Any ) -> str:
'''simple docstring'''
return self.bpe_tokenizer.batch_decode(UpperCamelCase_ )
def A__ ( self : List[Any] , UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =[seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(UpperCamelCase_ )]
return decode_strs
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class UpperCAmelCase_ ( TensorFormatter[Mapping, '''torch.Tensor''', Mapping] ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Optional[int]=None , **UpperCAmelCase : List[str] ) -> int:
'''simple docstring'''
super().__init__(features=UpperCAmelCase )
lowercase : Tuple =torch_tensor_kwargs
import torch # noqa import torch at initialization
def A__ ( self : int , UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , UpperCAmelCase ) and column:
if all(
isinstance(UpperCAmelCase , torch.Tensor ) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column ):
return torch.stack(UpperCAmelCase )
return column
def A__ ( self : List[str] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
import torch
if isinstance(UpperCAmelCase , (str, bytes, type(UpperCAmelCase )) ):
return value
elif isinstance(UpperCAmelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
lowercase : Union[str, Any] ={}
if isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
lowercase : List[str] ={"dtype": torch.intaa}
elif isinstance(UpperCAmelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
lowercase : Optional[Any] ={"dtype": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(UpperCAmelCase , PIL.Image.Image ):
lowercase : Tuple =np.asarray(UpperCAmelCase )
return torch.tensor(UpperCAmelCase , **{**default_dtype, **self.torch_tensor_kwargs} )
def A__ ( self : List[Any] , UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(UpperCAmelCase , '''__array__''' ) and not isinstance(UpperCAmelCase , torch.Tensor ):
lowercase : List[str] =data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(UpperCAmelCase , np.ndarray ):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
elif isinstance(UpperCAmelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(UpperCAmelCase ) for substruct in data_struct] )
return self._tensorize(UpperCAmelCase )
def A__ ( self : Tuple , UpperCAmelCase : dict ) -> Dict:
'''simple docstring'''
return map_nested(self._recursive_tensorize , UpperCAmelCase , map_list=UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : pa.Table ) -> str:
'''simple docstring'''
lowercase : Dict =self.numpy_arrow_extractor().extract_row(UpperCAmelCase )
lowercase : Union[str, Any] =self.python_features_decoder.decode_row(UpperCAmelCase )
return self.recursive_tensorize(UpperCAmelCase )
def A__ ( self : Optional[int] , UpperCAmelCase : pa.Table ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[int] =self.numpy_arrow_extractor().extract_column(UpperCAmelCase )
lowercase : str =self.python_features_decoder.decode_column(UpperCAmelCase , pa_table.column_names[0] )
lowercase : Optional[int] =self.recursive_tensorize(UpperCAmelCase )
lowercase : Tuple =self._consolidate(UpperCAmelCase )
return column
def A__ ( self : Dict , UpperCAmelCase : pa.Table ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.numpy_arrow_extractor().extract_batch(UpperCAmelCase )
lowercase : List[Any] =self.python_features_decoder.decode_batch(UpperCAmelCase )
lowercase : Any =self.recursive_tensorize(UpperCAmelCase )
for column_name in batch:
lowercase : List[Any] =self._consolidate(batch[column_name] )
return batch
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from manim import *
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : List[Any] =Rectangle(height=0.5 , width=0.5 )
lowercase : Optional[int] =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
lowercase : Any =[mem.copy() for i in range(6 )]
lowercase : Dict =[mem.copy() for i in range(6 )]
lowercase : Tuple =VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase : Tuple =VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase : Optional[Any] =VGroup(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase : Optional[int] =Text('''CPU''' , font_size=24 )
lowercase : Optional[int] =Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
cpu.move_to([-2.5, -0.5, 0] )
self.add(lowercase_ )
lowercase : Tuple =[mem.copy() for i in range(1 )]
lowercase : List[str] =VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase : Optional[Any] =Text('''GPU''' , font_size=24 )
lowercase : Union[str, Any] =Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
gpu.align_to(lowercase_ , lowercase_ )
gpu.set_x(gpu.get_x() - 1 )
self.add(lowercase_ )
lowercase : Any =[mem.copy() for i in range(6 )]
lowercase : List[str] =VGroup(*lowercase_ ).arrange(lowercase_ , buff=0 )
lowercase : List[str] =Text('''Model''' , font_size=24 )
lowercase : List[str] =Group(lowercase_ , lowercase_ ).arrange(lowercase_ , buff=0.5 , aligned_edge=lowercase_ )
model.move_to([3, -1.0, 0] )
self.play(
Create(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) , Create(lowercase_ , run_time=1 ) , )
lowercase : Any =MarkupText(
f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=24 , )
lowercase : Optional[Any] =Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase : int =MarkupText(
f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(lowercase_ , run_time=2.5 ) , Write(lowercase_ ) , Write(lowercase_ ) )
self.add(lowercase_ )
lowercase : Optional[int] =[]
lowercase : Tuple =[]
lowercase : Union[str, Any] =[]
for i, rect in enumerate(lowercase_ ):
lowercase : List[Any] =Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(lowercase_ , opacity=0.7 )
cpu_target.move_to(lowercase_ )
cpu_target.generate_target()
lowercase : Tuple =0.4_6 / 4
lowercase : List[Any] =0.4_6 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=lowercase_ )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=lowercase_ , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowercase_ , buff=0.0 )
cpu_targs.append(lowercase_ )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowercase_ ) )
second_animations.append(MoveToTarget(lowercase_ , run_time=1.5 ) )
self.play(*lowercase_ )
self.play(*lowercase_ )
self.wait()
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class UpperCAmelCase_ ( __lowerCAmelCase ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : str = None , UpperCAmelCase : List[str] = None , UpperCAmelCase : int = True , UpperCAmelCase : Union[str, Any] = None , UpperCAmelCase : Optional[int] = False , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Any = True , UpperCAmelCase : Tuple = "arrow" , **UpperCAmelCase : Tuple , ) -> Dict:
'''simple docstring'''
super().__init__(
split=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , keep_in_memory=lowerCAmelCase_ , streaming=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowercase : int =load_from_cache_file
lowercase : str =file_format
lowercase : Tuple =Spark(
df=lowerCAmelCase_ , features=lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , working_dir=lowerCAmelCase_ , **lowerCAmelCase_ , )
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
lowercase : Tuple =None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=lowerCAmelCase_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : str , __A : Dict ) -> int:
"""simple docstring"""
return number | (1 << position)
def lowercase_ ( __A : Union[str, Any] , __A : List[Any] ) -> int:
"""simple docstring"""
return number & ~(1 << position)
def lowercase_ ( __A : Union[str, Any] , __A : Any ) -> int:
"""simple docstring"""
return number ^ (1 << position)
def lowercase_ ( __A : List[str] , __A : Tuple ) -> bool:
"""simple docstring"""
return ((number >> position) & 1) == 1
def lowercase_ ( __A : Optional[Any] , __A : Tuple ) -> int:
"""simple docstring"""
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=100 , UpperCAmelCase : Optional[Any]=13 , UpperCAmelCase : int=30 , UpperCAmelCase : List[Any]=2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=32 , UpperCAmelCase : Optional[int]=4 , UpperCAmelCase : int=4 , UpperCAmelCase : Optional[int]=37 , UpperCAmelCase : Any="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : Dict=10 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : Any=3 , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=[0, 1, 2, 3] , ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =parent
lowercase : Tuple =100
lowercase : Optional[int] =batch_size
lowercase : Optional[int] =image_size
lowercase : List[Any] =patch_size
lowercase : Union[str, Any] =num_channels
lowercase : Tuple =is_training
lowercase : Union[str, Any] =use_labels
lowercase : Any =hidden_size
lowercase : Optional[int] =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : int =intermediate_size
lowercase : Optional[int] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : Optional[Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =type_sequence_label_size
lowercase : str =initializer_range
lowercase : Union[str, Any] =scope
lowercase : Union[str, Any] =out_indices
lowercase : List[str] =num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase : Tuple =(image_size // patch_size) ** 2
lowercase : Any =num_patches + 1
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : int =None
lowercase : Union[str, Any] =None
if self.use_labels:
lowercase : Tuple =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowercase : str =self.get_config()
return config, pixel_values, labels, pixel_labels
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def A__ ( self : str , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Optional[int] =BeitModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase : List[str] =model(snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : int =BeitForMaskedImageModeling(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase : Dict =model(snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =self.type_sequence_label_size
lowercase : int =BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase : List[Any] =model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase : Optional[Any] =1
lowercase : int =BeitForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase : Dict =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : List[Any] =model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : str =BeitForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
lowercase : List[str] =model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
lowercase : Union[str, Any] =model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
lowercase : str =config_and_inputs
lowercase : Optional[int] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': BeitModel,
'''image-classification''': BeitForImageClassification,
'''image-segmentation''': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : str ) -> int:
'''simple docstring'''
lowercase : Any =BeitModelTester(self )
lowercase : List[str] =ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=37 )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''BEiT does not use inputs_embeds''' )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason='''BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
pass
def A__ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] =model_class(snake_case__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : str =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) )
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] =model_class(snake_case__ )
lowercase : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[str] =[*signature.parameters.keys()]
lowercase : Optional[int] =["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case__ )
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
if not self.model_tester.is_training:
return
lowercase : Any =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]:
continue
lowercase : str =model_class(snake_case__ )
model.to(snake_case__ )
model.train()
lowercase : Tuple =self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowercase : Union[str, Any] =model(**snake_case__ ).loss
loss.backward()
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
lowercase : Optional[Any] =False
lowercase : Any =True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
lowercase : int =model_class(snake_case__ )
model.gradient_checkpointing_enable()
model.to(snake_case__ )
model.train()
lowercase : Union[str, Any] =self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
lowercase : Tuple =model(**snake_case__ ).loss
loss.backward()
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple =_config_zero_init(snake_case__ )
for model_class in self.all_model_classes:
lowercase : List[Any] =model_class(config=snake_case__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , )
@slow
def A__ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Tuple =BeitModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def lowercase_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained('''microsoft/beit-base-patch16-224''' ) if is_vision_available() else None
@slow
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[str] =BeitForMaskedImageModeling.from_pretrained('''microsoft/beit-base-patch16-224-pt22k''' ).to(snake_case__ )
lowercase : Optional[Any] =self.default_image_processor
lowercase : Dict =prepare_img()
lowercase : Optional[Any] =image_processor(images=snake_case__ , return_tensors='''pt''' ).pixel_values.to(snake_case__ )
# prepare bool_masked_pos
lowercase : str =torch.ones((1, 196) , dtype=torch.bool ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase : int =model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ )
lowercase : Any =outputs.logits
# verify the logits
lowercase : str =torch.Size((1, 196, 8192) )
self.assertEqual(logits.shape , snake_case__ )
lowercase : List[Any] =torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) )
@slow
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =BeitForImageClassification.from_pretrained('''microsoft/beit-base-patch16-224''' ).to(snake_case__ )
lowercase : Tuple =self.default_image_processor
lowercase : int =prepare_img()
lowercase : Tuple =image_processor(images=snake_case__ , return_tensors='''pt''' ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase : Union[str, Any] =model(**snake_case__ )
lowercase : Any =outputs.logits
# verify the logits
lowercase : Union[str, Any] =torch.Size((1, 1000) )
self.assertEqual(logits.shape , snake_case__ )
lowercase : int =torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
lowercase : str =281
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =BeitForImageClassification.from_pretrained('''microsoft/beit-large-patch16-224-pt22k-ft22k''' ).to(
snake_case__ )
lowercase : List[str] =self.default_image_processor
lowercase : str =prepare_img()
lowercase : Optional[int] =image_processor(images=snake_case__ , return_tensors='''pt''' ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase : int =model(**snake_case__ )
lowercase : Any =outputs.logits
# verify the logits
lowercase : List[str] =torch.Size((1, 2_1841) )
self.assertEqual(logits.shape , snake_case__ )
lowercase : Tuple =torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(snake_case__ )
self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) )
lowercase : Tuple =2396
self.assertEqual(logits.argmax(-1 ).item() , snake_case__ )
@slow
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase : Optional[int] =model.to(snake_case__ )
lowercase : Tuple =BeitImageProcessor(do_resize=snake_case__ , size=640 , do_center_crop=snake_case__ )
lowercase : str =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase : List[Any] =Image.open(ds[0]['''file'''] )
lowercase : int =image_processor(images=snake_case__ , return_tensors='''pt''' ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase : Tuple =model(**snake_case__ )
lowercase : List[str] =outputs.logits
# verify the logits
lowercase : Dict =torch.Size((1, 150, 160, 160) )
self.assertEqual(logits.shape , snake_case__ )
lowercase : Tuple =version.parse(PIL.__version__ ) < version.parse('''9.0.0''' )
if is_pillow_less_than_a:
lowercase : List[Any] =torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=snake_case__ , )
else:
lowercase : Optional[Any] =torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) )
@slow
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =BeitForSemanticSegmentation.from_pretrained('''microsoft/beit-base-finetuned-ade-640-640''' )
lowercase : int =model.to(snake_case__ )
lowercase : Union[str, Any] =BeitImageProcessor(do_resize=snake_case__ , size=640 , do_center_crop=snake_case__ )
lowercase : int =load_dataset('''hf-internal-testing/fixtures_ade20k''' , split='''test''' )
lowercase : str =Image.open(ds[0]['''file'''] )
lowercase : Optional[int] =image_processor(images=snake_case__ , return_tensors='''pt''' ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowercase : Optional[Any] =model(**snake_case__ )
lowercase : Optional[int] =outputs.logits.detach().cpu()
lowercase : List[str] =image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(500, 300)] )
lowercase : List[str] =torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , snake_case__ )
lowercase : str =image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
lowercase : str =torch.Size((160, 160) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any=13 , UpperCAmelCase : Optional[Any]=30 , UpperCAmelCase : int=2 , UpperCAmelCase : Tuple=3 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Dict=32 , UpperCAmelCase : Optional[Any]=5 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Optional[int]=10 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : Tuple=None , UpperCAmelCase : Dict=2 , ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =parent
lowercase : Union[str, Any] =batch_size
lowercase : Dict =image_size
lowercase : int =patch_size
lowercase : str =num_channels
lowercase : Union[str, Any] =is_training
lowercase : Any =use_labels
lowercase : Any =hidden_size
lowercase : List[str] =num_hidden_layers
lowercase : List[str] =num_attention_heads
lowercase : Tuple =intermediate_size
lowercase : str =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : Optional[Any] =type_sequence_label_size
lowercase : int =initializer_range
lowercase : Optional[Any] =scope
lowercase : Optional[Any] =encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase : List[Any] =(image_size // patch_size) ** 2
lowercase : Union[str, Any] =num_patches + 1
def A__ ( self : Optional[int] ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : Optional[int] =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Optional[Any] =self.get_config()
return config, pixel_values, labels
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
lowercase : Any =ViTModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase : Optional[Any] =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : int ) -> str:
'''simple docstring'''
lowercase : Tuple =ViTForMaskedImageModeling(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase : Dict =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase : int =1
lowercase : Any =ViTForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase : Dict =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : Optional[Any] =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
lowercase : str =self.type_sequence_label_size
lowercase : Union[str, Any] =ViTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase : Optional[Any] =model(SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase : int =1
lowercase : Any =ViTForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowercase : Optional[int] =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase : int =model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : int ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{'''feature-extraction''': ViTModel, '''image-classification''': ViTForImageClassification}
if is_torch_available()
else {}
)
UpperCamelCase_ = True
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =ViTModelTester(self )
lowercase : List[Any] =ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
pass
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase , lowercase : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Dict =model_class(SCREAMING_SNAKE_CASE_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : Optional[int] =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) )
def A__ ( self : str ) -> Any:
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[int] =model_class(SCREAMING_SNAKE_CASE_ )
lowercase : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : Optional[Any] =[*signature.parameters.keys()]
lowercase : Optional[int] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ )
def A__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*SCREAMING_SNAKE_CASE_ )
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self : Optional[int] ) -> int:
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Union[str, Any] =ViTModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def A__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase : str =ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(SCREAMING_SNAKE_CASE_ )
lowercase : Dict =self.default_image_processor
lowercase : List[Any] =prepare_img()
lowercase : Dict =image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase : List[Any] =model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase : Dict =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ )
lowercase : str =torch.tensor([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(SCREAMING_SNAKE_CASE_ )
lowercase : Optional[Any] =ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' , size=480 )
lowercase : str =prepare_img()
lowercase : int =image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowercase : Optional[int] =inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
lowercase : Dict =model(SCREAMING_SNAKE_CASE_ , interpolate_pos_encoding=SCREAMING_SNAKE_CASE_ )
# verify the logits
lowercase : int =torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE_ )
lowercase : Optional[Any] =torch.tensor(
[[4.2_3_4_0, 4.3_9_0_6, -6.6_6_9_2], [4.5_4_6_3, 1.8_9_2_8, -6.7_2_5_7], [4.4_4_2_9, 0.8_4_9_6, -5.8_5_8_5]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : List[Any] =ViTModel.from_pretrained('''facebook/dino-vits8''' , torch_dtype=torch.floataa , device_map='''auto''' )
lowercase : List[str] =self.default_image_processor
lowercase : Optional[int] =prepare_img()
lowercase : Tuple =image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' )
lowercase : Optional[Any] =inputs.pixel_values.to(SCREAMING_SNAKE_CASE_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase : List[Any] =model(SCREAMING_SNAKE_CASE_ )
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : str ) -> List[str]:
"""simple docstring"""
return "".join(chr(ord(__A ) - 3_2 ) if '''a''' <= char <= '''z''' else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_longt5': ['LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LongT5Config', 'LongT5OnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST',
'LongT5EncoderModel',
'LongT5ForConditionalGeneration',
'LongT5Model',
'LongT5PreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'FlaxLongT5ForConditionalGeneration',
'FlaxLongT5Model',
'FlaxLongT5PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longta import (
LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST,
LongTaEncoderModel,
LongTaForConditionalGeneration,
LongTaModel,
LongTaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_longta import (
FlaxLongTaForConditionalGeneration,
FlaxLongTaModel,
FlaxLongTaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
SCREAMING_SNAKE_CASE = 'path-to-your-trained-model'
SCREAMING_SNAKE_CASE = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
SCREAMING_SNAKE_CASE = 'A photo of sks dog in a bucket'
SCREAMING_SNAKE_CASE = pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import ast
import os
import re
import shutil
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.test_utils.examples import compare_against_test
from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow
from accelerate.utils import write_basic_config
# DataLoaders built from `test_samples/MRPC` for quick testing
# Should mock `{script_name}.get_dataloaders` via:
# @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders)
SCREAMING_SNAKE_CASE = [
'cross_validation.py',
'gradient_accumulation.py',
'local_sgd.py',
'multi_process_metrics.py',
'memory.py',
'automatic_gradient_accumulation.py',
'fsdp_with_peak_mem_tracking.py',
'deepspeed_with_config_support.py',
'megatron_lm_gpt_pretraining.py',
]
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] = None , UpperCAmelCase : Dict = None ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =None
lowercase : List[str] =os.path.abspath(os.path.join('''examples''' , '''by_feature''' ) )
lowercase : Tuple =os.path.abspath('''examples''' )
for item in os.listdir(__lowerCamelCase ):
if item not in EXCLUDE_EXAMPLES:
lowercase : Optional[int] =os.path.join(__lowerCamelCase , __lowerCamelCase )
if os.path.isfile(__lowerCamelCase ) and ".py" in item_path:
with self.subTest(
tested_script=__lowerCamelCase , feature_script=__lowerCamelCase , tested_section='''main()''' if parser_only else '''training_function()''' , ):
lowercase : Optional[int] =compare_against_test(
os.path.join(__lowerCamelCase , __lowerCamelCase ) , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
lowercase : Tuple ='''\n'''.join(__lowerCamelCase )
if special_strings is not None:
for string in special_strings:
lowercase : int =diff.replace(__lowerCamelCase , '''''' )
self.assertEqual(__lowerCamelCase , '''''' )
def A__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
self.one_complete_example('''complete_nlp_example.py''' , __lowerCamelCase )
self.one_complete_example('''complete_nlp_example.py''' , __lowerCamelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] =os.path.abspath(os.path.join('''examples''' , '''cv_example.py''' ) )
lowercase : Tuple =[
''' ''' * 16 + '''{\n\n''',
''' ''' * 20 + '''"accuracy": eval_metric["accuracy"],\n\n''',
''' ''' * 20 + '''"f1": eval_metric["f1"],\n\n''',
''' ''' * 20 + '''"train_loss": total_loss.item() / len(train_dataloader),\n\n''',
''' ''' * 20 + '''"epoch": epoch,\n\n''',
''' ''' * 16 + '''},\n\n''',
''' ''' * 16 + '''step=epoch,\n''',
''' ''' * 12,
''' ''' * 8 + '''for step, batch in enumerate(active_dataloader):\n''',
]
self.one_complete_example('''complete_cv_example.py''' , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
self.one_complete_example('''complete_cv_example.py''' , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} )
class UpperCAmelCase_ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCamelCase_ = False
@classmethod
def A__ ( cls : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
super().setUpClass()
lowercase : List[Any] =tempfile.mkdtemp()
lowercase : str =os.path.join(cls._tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
lowercase : Optional[int] =['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def A__ ( cls : Tuple ) -> List[str]:
'''simple docstring'''
super().tearDownClass()
shutil.rmtree(cls._tmpdir )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : List[str] =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps epoch\n --output_dir {self.tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''epoch_0''' ) ) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =f'\n examples/by_feature/checkpointing.py\n --checkpointing_steps 1\n --output_dir {self.tmpdir}\n '.split()
lowercase : str =run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(self.tmpdir , '''step_2''' ) ) )
def A__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "epoch_0" )}\n '.split()
lowercase : Optional[int] =run_command(self._launch_args + testargs , return_stdout=__lowerCamelCase )
self.assertNotIn('''epoch 0:''' , __lowerCamelCase )
self.assertIn('''epoch 1:''' , __lowerCamelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : Any =f'\n examples/by_feature/checkpointing.py\n --resume_from_checkpoint {os.path.join(self.tmpdir , "step_2" )}\n '.split()
lowercase : Any =run_command(self._launch_args + testargs , return_stdout=__lowerCamelCase )
if torch.cuda.is_available():
lowercase : Union[str, Any] =torch.cuda.device_count()
else:
lowercase : int =1
if num_processes > 1:
self.assertNotIn('''epoch 0:''' , __lowerCamelCase )
self.assertIn('''epoch 1:''' , __lowerCamelCase )
else:
self.assertIn('''epoch 0:''' , __lowerCamelCase )
self.assertIn('''epoch 1:''' , __lowerCamelCase )
@slow
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Dict ='''
examples/by_feature/cross_validation.py
--num_folds 2
'''.split()
with mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''0'''} ):
lowercase : Union[str, Any] =run_command(self._launch_args + testargs , return_stdout=__lowerCamelCase )
lowercase : List[Any] =re.findall('''({.+})''' , __lowerCamelCase )
lowercase : Union[str, Any] =[r for r in results if '''accuracy''' in r][-1]
lowercase : List[str] =ast.literal_eval(__lowerCamelCase )
self.assertGreaterEqual(results['''accuracy'''] , 0.7_5 )
def A__ ( self : Any ) -> Dict:
'''simple docstring'''
lowercase : List[str] =['''examples/by_feature/multi_process_metrics.py''']
run_command(self._launch_args + testargs )
@require_trackers
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def A__ ( self : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
lowercase : Any =f'\n examples/by_feature/tracking.py\n --with_tracking\n --project_dir {tmpdir}\n '.split()
run_command(self._launch_args + testargs )
self.assertTrue(os.path.exists(os.path.join(__lowerCamelCase , '''tracking''' ) ) )
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : Dict =['''examples/by_feature/gradient_accumulation.py''']
run_command(self._launch_args + testargs )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Any =['''examples/by_feature/local_sgd.py''']
run_command(self._launch_args + testargs )
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int = 1_0 ) -> str:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or n < 0:
raise ValueError('''Invalid input''' )
lowercase : Tuple =1_0**n
lowercase : Dict =2_8_4_3_3 * (pow(2 , 7_8_3_0_4_5_7 , UpperCamelCase__ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""")
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def lowercase_ ( __A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
if (
(cp >= 0X4_e00 and cp <= 0X9_fff)
or (cp >= 0X3_400 and cp <= 0X4_dbf) #
or (cp >= 0X20_000 and cp <= 0X2a_6df) #
or (cp >= 0X2a_700 and cp <= 0X2b_73f) #
or (cp >= 0X2b_740 and cp <= 0X2b_81f) #
or (cp >= 0X2b_820 and cp <= 0X2c_eaf) #
or (cp >= 0Xf_900 and cp <= 0Xf_aff)
or (cp >= 0X2f_800 and cp <= 0X2f_a1f) #
): #
return True
return False
def lowercase_ ( __A : str ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
lowercase : Tuple =ord(__SCREAMING_SNAKE_CASE )
if not _is_chinese_char(__SCREAMING_SNAKE_CASE ):
return 0
return 1
def lowercase_ ( __A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[int] =set()
for token in tokens:
lowercase : Dict =len(__SCREAMING_SNAKE_CASE ) > 1 and is_chinese(__SCREAMING_SNAKE_CASE )
if chinese_word:
word_set.add(__SCREAMING_SNAKE_CASE )
lowercase : int =list(__SCREAMING_SNAKE_CASE )
return word_list
def lowercase_ ( __A : Any , __A : str ) -> List[Any]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
lowercase : Dict =max([len(__SCREAMING_SNAKE_CASE ) for w in chinese_word_set] )
lowercase : Optional[int] =bert_tokens
lowercase : List[str] =0, len(__SCREAMING_SNAKE_CASE )
while start < end:
lowercase : Dict =True
if is_chinese(bert_word[start] ):
lowercase : List[Any] =min(end - start , __SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE , 1 , -1 ):
lowercase : Dict ="".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
lowercase : Optional[Any] ="##" + bert_word[j]
lowercase : str =start + i
lowercase : Dict =False
break
if single_word:
start += 1
return bert_word
def lowercase_ ( __A : Dict , __A : Optional[Any] , __A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Union[str, Any] =[]
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 1_0_0 ):
lowercase : Tuple =ltp_tokenizer.seg(lines[i : i + 1_0_0] )[0]
lowercase : Optional[int] =[get_chinese_word(__SCREAMING_SNAKE_CASE ) for r in res]
ltp_res.extend(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
lowercase : Optional[Any] =[]
for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 1_0_0 ):
lowercase : str =bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=5_1_2 )
bert_res.extend(res['''input_ids'''] )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
lowercase : int =[]
for input_ids, chinese_word in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
lowercase : List[str] =[]
for id in input_ids:
lowercase : List[str] =bert_tokenizer._convert_id_to_token(__SCREAMING_SNAKE_CASE )
input_tokens.append(__SCREAMING_SNAKE_CASE )
lowercase : Optional[Any] =add_sub_symbol(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase : Any =[]
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(__SCREAMING_SNAKE_CASE ):
if token[:2] == "##":
lowercase : List[str] =token[2:]
# save chinese tokens' pos
if len(__SCREAMING_SNAKE_CASE ) == 1 and _is_chinese_char(ord(__SCREAMING_SNAKE_CASE ) ):
ref_id.append(__SCREAMING_SNAKE_CASE )
ref_ids.append(__SCREAMING_SNAKE_CASE )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE )
return ref_ids
def lowercase_ ( __A : Union[str, Any] ) -> Any:
"""simple docstring"""
with open(args.file_name , '''r''' , encoding='''utf-8''' ) as f:
lowercase : List[Any] =f.readlines()
lowercase : Dict =[line.strip() for line in data if len(__SCREAMING_SNAKE_CASE ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
lowercase : int =LTP(args.ltp ) # faster in GPU device
lowercase : Dict =BertTokenizer.from_pretrained(args.bert )
lowercase : List[str] =prepare_ref(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
with open(args.save_path , '''w''' , encoding='''utf-8''' ) as f:
lowercase : str =[json.dumps(__SCREAMING_SNAKE_CASE ) + "\n" for ref in ref_ids]
f.writelines(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp', type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path'
)
parser.add_argument('--bert', type=str, default='./resources/robert', help='resources for Bert tokenizer')
parser.add_argument('--save_path', type=str, default='./resources/ref.txt', help='path to save res')
SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase_ ( __A : Tuple , __A : Optional[Any] , __A : Tuple , __A : Tuple , __A : str ) -> Optional[Any]:
"""simple docstring"""
lowercase : Tuple =TapasConfig.from_json_file(__A )
# set absolute/relative position embeddings parameter
lowercase : List[str] =reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
lowercase : int =TapasForQuestionAnswering(config=__A )
elif task == "WTQ":
# run_task_main.py hparams
lowercase : Union[str, Any] =4
lowercase : List[Any] =True
# hparam_utils.py hparams
lowercase : Optional[Any] =0.664694
lowercase : str =0.207951
lowercase : Tuple =0.121194
lowercase : Union[str, Any] =True
lowercase : Union[str, Any] =True
lowercase : Union[str, Any] =False
lowercase : List[Any] =0.0352513
lowercase : Any =TapasForQuestionAnswering(config=__A )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
lowercase : List[str] =4
lowercase : Tuple =False
# hparam_utils.py hparams
lowercase : Tuple =36.4519
lowercase : Any =0.903421
lowercase : List[str] =222.088
lowercase : List[Any] =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : List[str] =0.763141
lowercase : Tuple =TapasForQuestionAnswering(config=__A )
elif task == "TABFACT":
lowercase : Any =TapasForSequenceClassification(config=__A )
elif task == "MLM":
lowercase : int =TapasForMaskedLM(config=__A )
elif task == "INTERMEDIATE_PRETRAINING":
lowercase : Optional[int] =TapasModel(config=__A )
else:
raise ValueError(F'Task {task} not supported.' )
print(F'Building PyTorch model from configuration: {config}' )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__A , __A , __A )
# Save pytorch-model (weights and configuration)
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(__A )
# Save tokenizer files
print(F'Save tokenizer files to {pytorch_dump_path}' )
lowercase : Tuple =TapasTokenizer(vocab_file=tf_checkpoint_path[:-1_0] + '''vocab.txt''' , model_max_length=5_1_2 )
tokenizer.save_pretrained(__A )
print('''Used relative position embeddings:''' , model.config.reset_position_index_per_cell )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : List[str]=7 , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : int=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str=False , UpperCAmelCase : str=False , UpperCAmelCase : Any=False , UpperCAmelCase : Any=2 , UpperCAmelCase : Dict=99 , UpperCAmelCase : List[str]=0 , UpperCAmelCase : Optional[int]=32 , UpperCAmelCase : Optional[int]=5 , UpperCAmelCase : Tuple=4 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : List[str]=512 , UpperCAmelCase : Optional[Any]=12 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Union[str, Any]=0.0_2 , UpperCAmelCase : str=3 , UpperCAmelCase : str=4 , UpperCAmelCase : Optional[int]="last" , UpperCAmelCase : List[str]=None , UpperCAmelCase : Any=None , ) -> List[str]:
'''simple docstring'''
lowercase : Dict =parent
lowercase : Union[str, Any] =batch_size
lowercase : Optional[Any] =seq_length
lowercase : str =is_training
lowercase : int =use_input_lengths
lowercase : Optional[Any] =use_token_type_ids
lowercase : Any =use_labels
lowercase : Optional[Any] =gelu_activation
lowercase : Union[str, Any] =sinusoidal_embeddings
lowercase : str =causal
lowercase : Optional[int] =asm
lowercase : List[str] =n_langs
lowercase : List[str] =vocab_size
lowercase : Tuple =n_special
lowercase : Optional[int] =hidden_size
lowercase : Optional[int] =num_hidden_layers
lowercase : Union[str, Any] =num_attention_heads
lowercase : Optional[int] =hidden_dropout_prob
lowercase : str =attention_probs_dropout_prob
lowercase : Tuple =max_position_embeddings
lowercase : Optional[int] =type_vocab_size
lowercase : List[Any] =type_sequence_label_size
lowercase : List[Any] =initializer_range
lowercase : Tuple =num_labels
lowercase : str =num_choices
lowercase : Optional[Any] =summary_type
lowercase : int =use_proj
lowercase : int =scope
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict =None
if self.use_input_lengths:
lowercase : Optional[int] =(
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Dict =None
if self.use_token_type_ids:
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Dict =None
lowercase : Tuple =None
lowercase : List[Any] =None
if self.use_labels:
lowercase : List[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[int] =ids_tensor([self.batch_size] , 2 ).float()
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[str] =self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def A__ ( self : Tuple , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : int , ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =FlaubertModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase , lengths=UpperCAmelCase , langs=UpperCAmelCase )
lowercase : Any =model(UpperCAmelCase , langs=UpperCAmelCase )
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any] , ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =FlaubertWithLMHeadModel(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Any =model(UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =FlaubertForQuestionAnsweringSimple(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[Any] =model(UpperCAmelCase )
lowercase : Tuple =model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : int , ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =FlaubertForQuestionAnswering(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Any =model(UpperCAmelCase )
lowercase : Optional[Any] =model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , p_mask=UpperCAmelCase , )
lowercase : Dict =model(
UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase , cls_index=UpperCAmelCase , is_impossible=UpperCAmelCase , )
(lowercase ) : Optional[int] =result_with_labels.to_tuple()
lowercase : str =model(UpperCAmelCase , start_positions=UpperCAmelCase , end_positions=UpperCAmelCase )
(lowercase ) : Optional[int] =result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Tuple , UpperCAmelCase : Tuple , UpperCAmelCase : Union[str, Any] , ) -> int:
'''simple docstring'''
lowercase : Dict =FlaubertForSequenceClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Optional[Any] =model(UpperCAmelCase )
lowercase : Optional[Any] =model(UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =self.num_labels
lowercase : Optional[int] =FlaubertForTokenClassification(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Union[str, Any] =model(UpperCAmelCase , attention_mask=UpperCAmelCase , labels=UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , ) -> List[Any]:
'''simple docstring'''
lowercase : str =self.num_choices
lowercase : Dict =FlaubertForMultipleChoice(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : int =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Optional[int] =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Union[str, Any] =model(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , labels=UpperCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
(
lowercase
) : Tuple =config_and_inputs
lowercase : Optional[Any] ={
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def A__ ( self : Union[str, Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str=False ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =super()._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
lowercase : str =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
lowercase : Dict =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase )
return inputs_dict
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] =FlaubertModelTester(self )
lowercase : Optional[int] =ConfigTester(self , config_class=UpperCAmelCase , emb_dim=37 )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCAmelCase )
def A__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCAmelCase )
def A__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCAmelCase )
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : List[Any] =FlaubertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@slow
@require_torch_gpu
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
lowercase : Optional[Any] =True
lowercase : Any =model_class(config=UpperCAmelCase )
lowercase : Tuple =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowercase : int =torch.jit.trace(
UpperCAmelCase , (inputs_dict['''input_ids'''].to('''cpu''' ), inputs_dict['''attention_mask'''].to('''cpu''' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCAmelCase , os.path.join(UpperCAmelCase , '''traced_model.pt''' ) )
lowercase : Union[str, Any] =torch.jit.load(os.path.join(UpperCAmelCase , '''traced_model.pt''' ) , map_location=UpperCAmelCase )
loaded(inputs_dict['''input_ids'''].to(UpperCAmelCase ) , inputs_dict['''attention_mask'''].to(UpperCAmelCase ) )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : List[str] =FlaubertModel.from_pretrained('''flaubert/flaubert_base_cased''' )
lowercase : Any =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
lowercase : Union[str, Any] =model(UpperCAmelCase )[0]
lowercase : int =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Union[str, Any] =torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple=13 , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Tuple=True , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[int]=99 , UpperCAmelCase : int=32 , UpperCAmelCase : List[str]=5 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : Union[str, Any]="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Tuple=512 , UpperCAmelCase : Optional[Any]=16 , UpperCAmelCase : Optional[int]=2 , UpperCAmelCase : int=0.0_2 , UpperCAmelCase : Dict=4 , ) -> int:
'''simple docstring'''
lowercase : int =parent
lowercase : Optional[int] =batch_size
lowercase : Optional[Any] =seq_length
lowercase : Optional[Any] =is_training
lowercase : Union[str, Any] =use_attention_mask
lowercase : Optional[int] =use_token_type_ids
lowercase : Union[str, Any] =use_labels
lowercase : List[str] =vocab_size
lowercase : List[str] =hidden_size
lowercase : Dict =num_hidden_layers
lowercase : Optional[Any] =num_attention_heads
lowercase : Union[str, Any] =intermediate_size
lowercase : List[Any] =hidden_act
lowercase : Dict =hidden_dropout_prob
lowercase : Optional[int] =attention_probs_dropout_prob
lowercase : str =max_position_embeddings
lowercase : str =type_vocab_size
lowercase : str =type_sequence_label_size
lowercase : Tuple =initializer_range
lowercase : List[str] =num_choices
def A__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_attention_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=__UpperCamelCase , )
return config, input_ids, attention_mask
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
lowercase , lowercase , lowercase : Optional[Any] =config_and_inputs
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =FlaxDistilBertModelTester(self )
@slow
def A__ ( self : Any ) -> Tuple:
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowercase : Any =model_class_name.from_pretrained('''distilbert-base-uncased''' )
lowercase : Dict =model(np.ones((1, 1) ) )
self.assertIsNotNone(__UpperCamelCase )
@require_flax
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : int =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Optional[Any] =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase : List[str] =model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
lowercase : int =(1, 11, 768)
self.assertEqual(output.shape , __UpperCamelCase )
lowercase : Optional[int] =np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1e-4 ) )
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
'''simple docstring'''
import argparse
import torch
from datasets import load_dataset
from donut import DonutModel
from transformers import (
DonutImageProcessor,
DonutProcessor,
DonutSwinConfig,
DonutSwinModel,
MBartConfig,
MBartForCausalLM,
VisionEncoderDecoderModel,
XLMRobertaTokenizerFast,
)
def lowercase_ ( __A : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Dict =model.config
lowercase : List[Any] =DonutSwinConfig(
image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , )
lowercase : List[Any] =MBartConfig(
is_decoder=__A , is_encoder_decoder=__A , add_cross_attention=__A , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len(
model.decoder.tokenizer ) , scale_embedding=__A , add_final_layer_norm=__A , )
return encoder_config, decoder_config
def lowercase_ ( __A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
if "encoder.model" in name:
lowercase : int =name.replace('''encoder.model''' , '''encoder''' )
if "decoder.model" in name:
lowercase : Dict =name.replace('''decoder.model''' , '''decoder''' )
if "patch_embed.proj" in name:
lowercase : int =name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase : Tuple =name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if name.startswith('''encoder''' ):
if "layers" in name:
lowercase : Any ="""encoder.""" + name
if "attn.proj" in name:
lowercase : Any =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name and "mask" not in name:
lowercase : Tuple =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : Dict =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : Union[str, Any] =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : List[Any] =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : Dict =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase : str ="""encoder.layernorm.weight"""
if name == "encoder.norm.bias":
lowercase : Dict ="""encoder.layernorm.bias"""
return name
def lowercase_ ( __A : Optional[Any] , __A : int ) -> Optional[Any]:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : List[str] =orig_state_dict.pop(__A )
if "qkv" in key:
lowercase : int =key.split('''.''' )
lowercase : List[str] =int(key_split[3] )
lowercase : Optional[Any] =int(key_split[5] )
lowercase : str =model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Union[str, Any] =val[:dim, :]
lowercase : Optional[int] =val[dim : dim * 2, :]
lowercase : Union[str, Any] =val[-dim:, :]
else:
lowercase : List[Any] =val[:dim]
lowercase : Optional[Any] =val[dim : dim * 2]
lowercase : Dict =val[-dim:]
elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]:
# HuggingFace implementation doesn't use attn_mask buffer
# and model doesn't use final LayerNorms for the encoder
pass
else:
lowercase : List[str] =val
return orig_state_dict
def lowercase_ ( __A : int , __A : List[Any]=None , __A : Dict=False ) -> List[Any]:
"""simple docstring"""
lowercase : Dict =DonutModel.from_pretrained(__A ).eval()
# load HuggingFace model
lowercase : List[str] =get_configs(__A )
lowercase : Union[str, Any] =DonutSwinModel(__A )
lowercase : Union[str, Any] =MBartForCausalLM(__A )
lowercase : Any =VisionEncoderDecoderModel(encoder=__A , decoder=__A )
model.eval()
lowercase : List[Any] =original_model.state_dict()
lowercase : List[Any] =convert_state_dict(__A , __A )
model.load_state_dict(__A )
# verify results on scanned document
lowercase : str =load_dataset('''hf-internal-testing/example-documents''' )
lowercase : List[Any] =dataset["""test"""][0]["""image"""].convert('''RGB''' )
lowercase : List[Any] =XLMRobertaTokenizerFast.from_pretrained(__A , from_slow=__A )
lowercase : Tuple =DonutImageProcessor(
do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] )
lowercase : Optional[int] =DonutProcessor(__A , __A )
lowercase : Tuple =processor(__A , return_tensors='''pt''' ).pixel_values
if model_name == "naver-clova-ix/donut-base-finetuned-docvqa":
lowercase : List[str] ="""<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
lowercase : Union[str, Any] ="""When is the coffee break?"""
lowercase : List[str] =task_prompt.replace('''{user_input}''' , __A )
elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip":
lowercase : str ="""<s_rvlcdip>"""
elif model_name in [
"naver-clova-ix/donut-base-finetuned-cord-v1",
"naver-clova-ix/donut-base-finetuned-cord-v1-2560",
]:
lowercase : str ="""<s_cord>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2":
lowercase : Any ="""s_cord-v2>"""
elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket":
lowercase : List[str] ="""<s_zhtrainticket>"""
elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]:
# use a random prompt
lowercase : int ="""hello world"""
else:
raise ValueError('''Model name not supported''' )
lowercase : List[Any] =original_model.decoder.tokenizer(__A , add_special_tokens=__A , return_tensors='''pt''' )[
"""input_ids"""
]
lowercase : Tuple =original_model.encoder.model.patch_embed(__A )
lowercase : str =model.encoder.embeddings(__A )
assert torch.allclose(__A , __A , atol=1E-3 )
# verify encoder hidden states
lowercase : Any =original_model.encoder(__A )
lowercase : Optional[int] =model.encoder(__A ).last_hidden_state
assert torch.allclose(__A , __A , atol=1E-2 )
# verify decoder hidden states
lowercase : int =original_model(__A , __A , __A ).logits
lowercase : Dict =model(__A , decoder_input_ids=__A ).logits
assert torch.allclose(__A , __A , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model and processor to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
processor.save_pretrained(__A )
if push_to_hub:
model.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
processor.push_to_hub('''nielsr/''' + model_name.split('''/''' )[-1] , commit_message='''Update model''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='naver-clova-ix/donut-base-finetuned-docvqa',
required=False,
type=str,
help='Name of the original model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
required=False,
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub.',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = (DPMSolverSDEScheduler,)
UpperCamelCase_ = 10
def A__ ( self : int , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] ={
'num_train_timesteps': 1100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'noise_sampler_seed': 0,
}
config.update(**UpperCamelCase__ )
return config
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=UpperCamelCase__ , beta_end=UpperCamelCase__ )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=UpperCamelCase__ )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =self.scheduler_classes[0]
lowercase : List[Any] =self.get_scheduler_config()
lowercase : List[str] =scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase : Union[str, Any] =self.dummy_model()
lowercase : Any =self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase : Any =sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase : Any =scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowercase : List[str] =model(UpperCamelCase__ , UpperCamelCase__ )
lowercase : Optional[Any] =scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase : Optional[Any] =output.prev_sample
lowercase : Tuple =torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase : Optional[Any] =torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =self.scheduler_classes[0]
lowercase : List[Any] =self.get_scheduler_config(prediction_type='''v_prediction''' )
lowercase : Optional[int] =scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps )
lowercase : Dict =self.dummy_model()
lowercase : Optional[Any] =self.dummy_sample_deter * scheduler.init_noise_sigma
lowercase : Tuple =sample.to(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
lowercase : Tuple =scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowercase : str =model(UpperCamelCase__ , UpperCamelCase__ )
lowercase : List[Any] =scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase : Optional[int] =output.prev_sample
lowercase : List[Any] =torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase : Union[str, Any] =torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1e-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Any =self.scheduler_classes[0]
lowercase : List[Any] =self.get_scheduler_config()
lowercase : int =scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowercase : List[Any] =self.dummy_model()
lowercase : Dict =self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
lowercase : Tuple =scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowercase : Any =model(UpperCamelCase__ , UpperCamelCase__ )
lowercase : List[str] =scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase : Optional[Any] =output.prev_sample
lowercase : List[Any] =torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase : Dict =torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] =self.scheduler_classes[0]
lowercase : List[Any] =self.get_scheduler_config()
lowercase : Optional[Any] =scheduler_class(**UpperCamelCase__ , use_karras_sigmas=UpperCamelCase__ )
scheduler.set_timesteps(self.num_inference_steps , device=UpperCamelCase__ )
lowercase : Dict =self.dummy_model()
lowercase : str =self.dummy_sample_deter.to(UpperCamelCase__ ) * scheduler.init_noise_sigma
lowercase : int =sample.to(UpperCamelCase__ )
for t in scheduler.timesteps:
lowercase : Optional[int] =scheduler.scale_model_input(UpperCamelCase__ , UpperCamelCase__ )
lowercase : List[str] =model(UpperCamelCase__ , UpperCamelCase__ )
lowercase : Dict =scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowercase : Optional[Any] =output.prev_sample
lowercase : int =torch.sum(torch.abs(UpperCamelCase__ ) )
lowercase : int =torch.mean(torch.abs(UpperCamelCase__ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all BART models at https://huggingface.co/models?filter=bart
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/vocab.json',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/vocab.json',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json',
},
'merges_file': {
'facebook/bart-base': 'https://huggingface.co/facebook/bart-base/resolve/main/merges.txt',
'facebook/bart-large': 'https://huggingface.co/facebook/bart-large/resolve/main/merges.txt',
'facebook/bart-large-mnli': 'https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt',
'facebook/bart-large-cnn': 'https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt',
'facebook/bart-large-xsum': 'https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt',
'yjernite/bart_eli5': 'https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt',
},
}
SCREAMING_SNAKE_CASE = {
'facebook/bart-base': 1_024,
'facebook/bart-large': 1_024,
'facebook/bart-large-mnli': 1_024,
'facebook/bart-large-cnn': 1_024,
'facebook/bart-large-xsum': 1_024,
'yjernite/bart_eli5': 1_024,
}
@lru_cache()
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : List[str] =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Optional[int] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : int =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[Any] =set()
lowercase : Any =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : str =char
return pairs
class UpperCAmelCase_ ( a__ ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["input_ids", "attention_mask"]
def __init__( self : Any , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : Optional[Any]="replace" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : int="</s>" , UpperCAmelCase : str="</s>" , UpperCAmelCase : int="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Union[str, Any]="<pad>" , UpperCAmelCase : str="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : Union[str, Any] , ) -> str:
'''simple docstring'''
lowercase : Tuple =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else bos_token
lowercase : Optional[Any] =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else eos_token
lowercase : List[str] =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else sep_token
lowercase : List[Any] =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else cls_token
lowercase : Dict =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else unk_token
lowercase : List[str] =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Tuple =AddedToken(lowerCamelCase_ , lstrip=lowerCamelCase_ , rstrip=lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else mask_token
super().__init__(
errors=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , sep_token=lowerCamelCase_ , cls_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , mask_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , **lowerCamelCase_ , )
with open(lowerCamelCase_ , encoding='''utf-8''' ) as vocab_handle:
lowercase : List[str] =json.load(lowerCamelCase_ )
lowercase : Dict ={v: k for k, v in self.encoder.items()}
lowercase : Tuple =errors # how to handle errors in decoding
lowercase : Optional[int] =bytes_to_unicode()
lowercase : List[Any] ={v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase_ , encoding='''utf-8''' ) as merges_handle:
lowercase : Optional[int] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : List[str] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Tuple =dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
lowercase : Tuple ={}
lowercase : Union[str, Any] =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : List[Any] =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : Tuple , UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : Optional[Any] =tuple(lowerCamelCase_ )
lowercase : int =get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
lowercase : List[Any] =min(lowerCamelCase_ , key=lambda UpperCAmelCase : self.bpe_ranks.get(lowerCamelCase_ , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : List[str] =bigram
lowercase : Tuple =[]
lowercase : Any =0
while i < len(lowerCamelCase_ ):
try:
lowercase : Union[str, Any] =word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Tuple =j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(lowerCamelCase_ )
lowercase : Tuple =new_word
if len(lowerCamelCase_ ) == 1:
break
else:
lowercase : int =get_pairs(lowerCamelCase_ )
lowercase : str =''' '''.join(lowerCamelCase_ )
lowercase : Optional[int] =word
return word
def A__ ( self : List[Any] , UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
lowercase : Any =[]
for token in re.findall(self.pat , lowerCamelCase_ ):
lowercase : int =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase_ ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Any , UpperCAmelCase : Dict ) -> Tuple:
'''simple docstring'''
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def A__ ( self : Optional[int] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ )
def A__ ( self : Dict , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : int =''''''.join(lowerCamelCase_ )
lowercase : List[Any] =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : List[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[int] =os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[str] =os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + '''\n''' )
lowercase : Optional[int] =0
with open(lowerCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : int =token_index
writer.write(''' '''.join(lowerCamelCase_ ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
lowercase : Dict =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] = None , UpperCAmelCase : Any = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase_ , token_ids_a=lowerCamelCase_ , already_has_special_tokens=lowerCamelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase_ )) + [1]
return [1] + ([0] * len(lowerCamelCase_ )) + [1, 1] + ([0] * len(lowerCamelCase_ )) + [1]
def A__ ( self : str , UpperCAmelCase : Any , UpperCAmelCase : List[Any] = None ) -> List[int]:
'''simple docstring'''
lowercase : int =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Any=False , **UpperCAmelCase : int ) -> Any:
'''simple docstring'''
lowercase : List[Any] =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase_ ) > 0 and not text[0].isspace()):
lowercase : Optional[int] =''' ''' + text
return (text, kwargs)
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
from itertools import count
def lowercase_ ( __A : int = 5_0 ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] =[1] * min_block_length
for n in count(__A ):
fill_count_functions.append(1 )
for block_length in range(__A , n + 1 ):
for block_start in range(n - block_length ):
fill_count_functions[n] += fill_count_functions[
n - block_start - block_length - 1
]
fill_count_functions[n] += 1
if fill_count_functions[n] > 1_0_0_0_0_0_0:
break
return n
if __name__ == "__main__":
print(f"""{solution() = }""")
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
# General docstring
SCREAMING_SNAKE_CASE = "RegNetConfig"
# Base docstring
SCREAMING_SNAKE_CASE = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE = [1, 1_088, 7, 7]
# Image classification docstring
SCREAMING_SNAKE_CASE = "facebook/regnet-y-040"
SCREAMING_SNAKE_CASE = "tabby, tabby cat"
SCREAMING_SNAKE_CASE = [
"facebook/regnet-y-040",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 1 , UpperCAmelCase : int = 1 , UpperCAmelCase : Optional[str] = "relu" , **UpperCAmelCase : Dict , ) -> Dict:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
lowercase : List[str] =tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
lowercase : List[str] =tf.keras.layers.ConvaD(
filters=lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , strides=lowerCAmelCase_ , padding='''VALID''' , groups=lowerCAmelCase_ , use_bias=lowerCAmelCase_ , name='''convolution''' , )
lowercase : Tuple =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
lowercase : Any =ACTaFN[activation] if activation is not None else tf.identity
def A__ ( self : Optional[int] , UpperCAmelCase : int ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =self.convolution(self.padding(lowerCAmelCase_ ) )
lowercase : Tuple =self.normalization(lowerCAmelCase_ )
lowercase : List[Any] =self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : RegNetConfig , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase : Optional[Any] =config.num_channels
lowercase : Any =TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='''embedder''' , )
def A__ ( self : Any , UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
lowercase : str =shape_list(lowerCAmelCase_ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
lowercase : Dict =tf.transpose(lowerCAmelCase_ , perm=(0, 2, 3, 1) )
lowercase : Union[str, Any] =self.embedder(lowerCAmelCase_ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : int = 2 , **UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase : int =tf.keras.layers.ConvaD(
filters=lowerCAmelCase_ , kernel_size=1 , strides=lowerCAmelCase_ , use_bias=lowerCAmelCase_ , name='''convolution''' )
lowercase : Any =tf.keras.layers.BatchNormalization(epsilon=1e-5 , momentum=0.9 , name='''normalization''' )
def A__ ( self : Dict , UpperCAmelCase : tf.Tensor , UpperCAmelCase : bool = False ) -> int:
'''simple docstring'''
return self.normalization(self.convolution(lowerCAmelCase_ ) , training=lowerCAmelCase_ )
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : int , UpperCAmelCase : int , UpperCAmelCase : int , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase : int =tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase_ , name='''pooler''' )
lowercase : Dict =[
tf.keras.layers.ConvaD(filters=lowerCAmelCase_ , kernel_size=1 , activation='''relu''' , name='''attention.0''' ),
tf.keras.layers.ConvaD(filters=lowerCAmelCase_ , kernel_size=1 , activation='''sigmoid''' , name='''attention.2''' ),
]
def A__ ( self : Tuple , UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =self.pooler(lowerCAmelCase_ )
for layer_module in self.attention:
lowercase : List[Any] =layer_module(lowerCAmelCase_ )
lowercase : Dict =hidden_state * pooled
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase : Optional[int] =in_channels != out_channels or stride != 1
lowercase : int =max(1 , out_channels // config.groups_width )
lowercase : Optional[int] =(
TFRegNetShortCut(lowerCAmelCase_ , stride=lowerCAmelCase_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
lowercase : Tuple =[
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ , name='''layer.2''' ),
]
lowercase : Any =ACTaFN[config.hidden_act]
def A__ ( self : Union[str, Any] , UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =hidden_state
for layer_module in self.layers:
lowercase : Tuple =layer_module(lowerCAmelCase_ )
lowercase : Optional[Any] =self.shortcut(lowerCAmelCase_ )
hidden_state += residual
lowercase : Optional[Any] =self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 1 , **UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase : Tuple =in_channels != out_channels or stride != 1
lowercase : int =max(1 , out_channels // config.groups_width )
lowercase : Union[str, Any] =(
TFRegNetShortCut(lowerCAmelCase_ , stride=lowerCAmelCase_ , name='''shortcut''' )
if should_apply_shortcut
else tf.keras.layers.Activation('''linear''' , name='''shortcut''' )
)
lowercase : List[Any] =[
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=config.hidden_act , name='''layer.0''' ),
TFRegNetConvLayer(
lowerCAmelCase_ , stride=lowerCAmelCase_ , groups=lowerCAmelCase_ , activation=config.hidden_act , name='''layer.1''' ),
TFRegNetSELayer(lowerCAmelCase_ , reduced_channels=int(round(in_channels / 4 ) ) , name='''layer.2''' ),
TFRegNetConvLayer(lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ , name='''layer.3''' ),
]
lowercase : List[str] =ACTaFN[config.hidden_act]
def A__ ( self : Tuple , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =hidden_state
for layer_module in self.layers:
lowercase : List[str] =layer_module(lowerCAmelCase_ )
lowercase : List[str] =self.shortcut(lowerCAmelCase_ )
hidden_state += residual
lowercase : Any =self.activation(lowerCAmelCase_ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : RegNetConfig , UpperCAmelCase : int , UpperCAmelCase : int , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , **UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase : int =TFRegNetXLayer if config.layer_type == '''x''' else TFRegNetYLayer
lowercase : Optional[Any] =[
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , name='''layers.0''' ),
*[layer(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , name=f'layers.{i+1}' ) for i in range(depth - 1 )],
]
def A__ ( self : List[str] , UpperCAmelCase : int ) -> int:
'''simple docstring'''
for layer_module in self.layers:
lowercase : str =layer_module(lowerCAmelCase_ )
return hidden_state
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : RegNetConfig , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase : Optional[int] =[]
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='''stages.0''' , ) )
lowercase : Any =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(lowerCAmelCase_ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ , name=f'stages.{i+1}' ) )
def A__ ( self : Dict , UpperCAmelCase : tf.Tensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
lowercase : Optional[Any] =hidden_states + (hidden_state,)
lowercase : str =stage_module(lowerCAmelCase_ )
if output_hidden_states:
lowercase : Optional[int] =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ )
@keras_serializable
class UpperCAmelCase_ ( tf.keras.layers.Layer ):
"""simple docstring"""
UpperCamelCase_ = RegNetConfig
def __init__( self : Any , UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ )
lowercase : Union[str, Any] =config
lowercase : List[str] =TFRegNetEmbeddings(lowerCAmelCase_ , name='''embedder''' )
lowercase : Tuple =TFRegNetEncoder(lowerCAmelCase_ , name='''encoder''' )
lowercase : Optional[Any] =tf.keras.layers.GlobalAveragePoolingaD(keepdims=lowerCAmelCase_ , name='''pooler''' )
@unpack_inputs
def A__ ( self : Any , UpperCAmelCase : tf.Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , ) -> Tuple:
'''simple docstring'''
lowercase : Optional[Any] =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Tuple =return_dict if return_dict is not None else self.config.use_return_dict
lowercase : List[Any] =self.embedder(lowerCAmelCase_ , training=lowerCAmelCase_ )
lowercase : Dict =self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , training=lowerCAmelCase_ )
lowercase : List[str] =encoder_outputs[0]
lowercase : Optional[int] =self.pooler(lowerCAmelCase_ )
# Change to NCHW output format have uniformity in the modules
lowercase : List[Any] =tf.transpose(lowerCAmelCase_ , perm=(0, 3, 1, 2) )
lowercase : List[Any] =tf.transpose(lowerCAmelCase_ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
lowercase : Tuple =tuple([tf.transpose(lowerCAmelCase_ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCAmelCase_ ( __UpperCAmelCase ):
"""simple docstring"""
UpperCamelCase_ = RegNetConfig
UpperCamelCase_ = '''regnet'''
UpperCamelCase_ = '''pixel_values'''
@property
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
SCREAMING_SNAKE_CASE = r"\n Parameters:\n This model is a Tensorflow\n [tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a\n regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and\n behavior.\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.\n"
SCREAMING_SNAKE_CASE = r"\n Args:\n pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConveNextImageProcessor.__call__`] for details.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'''The bare RegNet model outputting raw features without any specific head on top.''' , __UpperCAmelCase , )
class UpperCAmelCase_ ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : RegNetConfig , *UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
super().__init__(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
lowercase : Any =TFRegNetMainLayer(lowerCAmelCase_ , name='''regnet''' )
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def A__ ( self : Optional[int] , UpperCAmelCase : tf.Tensor , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : int=False , ) -> Optional[Any]:
'''simple docstring'''
lowercase : str =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Dict =return_dict if return_dict is not None else self.config.use_return_dict
lowercase : List[str] =self.regnet(
pixel_values=lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , training=lowerCAmelCase_ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
'''\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ''' , __UpperCAmelCase , )
class UpperCAmelCase_ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : RegNetConfig , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
lowercase : List[str] =config.num_labels
lowercase : int =TFRegNetMainLayer(lowerCAmelCase_ , name='''regnet''' )
# classification head
lowercase : str =[
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='''classifier.1''' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def A__ ( self : int , UpperCAmelCase : tf.Tensor = None , UpperCAmelCase : tf.Tensor = None , UpperCAmelCase : bool = None , UpperCAmelCase : bool = None , UpperCAmelCase : Tuple=False , ) -> List[str]:
'''simple docstring'''
lowercase : int =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
lowercase : List[Any] =self.regnet(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ , training=lowerCAmelCase_ )
lowercase : Optional[int] =outputs.pooler_output if return_dict else outputs[1]
lowercase : Optional[int] =self.classifier[0](lowerCAmelCase_ )
lowercase : Optional[Any] =self.classifier[1](lowerCAmelCase_ )
lowercase : Optional[int] =None if labels is None else self.hf_compute_loss(labels=lowerCAmelCase_ , logits=lowerCAmelCase_ )
if not return_dict:
lowercase : List[Any] =(logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger('transformers.models.speecht5')
SCREAMING_SNAKE_CASE = {
'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm',
'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection',
'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv',
'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed',
}
SCREAMING_SNAKE_CASE = {
'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens',
'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha',
}
SCREAMING_SNAKE_CASE = {
'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0',
'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1',
'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer',
'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha',
'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer',
}
SCREAMING_SNAKE_CASE = {
'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out',
'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out',
'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv',
'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm',
'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv',
'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm',
'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv',
'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm',
'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv',
'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm',
'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv',
'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm',
}
SCREAMING_SNAKE_CASE = {
'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens',
}
SCREAMING_SNAKE_CASE = {
'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head',
}
SCREAMING_SNAKE_CASE = {
'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj',
'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj',
'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj',
'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj',
'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm',
'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense',
'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense',
'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm',
'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k',
}
SCREAMING_SNAKE_CASE = {
'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj',
'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj',
'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj',
'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj',
'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm',
'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj',
'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj',
'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj',
'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj',
'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm',
'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense',
'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense',
'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm',
}
SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
SCREAMING_SNAKE_CASE = []
SCREAMING_SNAKE_CASE = [
'encoder.version',
'encoder.layers.*.norm_k.weight',
'encoder.layers.*.norm_k.bias',
'decoder.version',
'decoder.layers.*.norm_k.weight',
'decoder.layers.*.norm_k.bias',
'decoder.pos_emb.pe_k',
'speech_encoder_prenet.embed_positions._float_tensor',
'text_decoder_prenet.embed_positions._float_tensor',
]
SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'speech_decoder_prenet.*',
'speech_decoder_postnet.*',
]
SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'speech_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
SCREAMING_SNAKE_CASE = IGNORE_KEYS + [
'encoder.proj',
'text_encoder_prenet.*',
'text_decoder_prenet.*',
'text_decoder_postnet.*',
]
def lowercase_ ( __A : Any , __A : List[str] , __A : str , __A : Optional[Any] , __A : Optional[Any] ) -> str:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase : Optional[Any] =getattr(a_ , a_ )
if weight_type is not None:
lowercase : List[str] =getattr(a_ , a_ ).shape
else:
lowercase : List[Any] =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
lowercase : Optional[int] =value
elif weight_type == "weight_g":
lowercase : Union[str, Any] =value
elif weight_type == "weight_v":
lowercase : Any =value
elif weight_type == "bias":
lowercase : Tuple =value
elif weight_type == "running_mean":
lowercase : int =value
elif weight_type == "running_var":
lowercase : str =value
elif weight_type == "num_batches_tracked":
lowercase : Optional[Any] =value
else:
lowercase : List[Any] =value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def lowercase_ ( __A : str , __A : Optional[int] ) -> int:
"""simple docstring"""
for key in ignore_keys:
if key.endswith('''.*''' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
lowercase : Optional[Any] =key.split('''.*.''' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def lowercase_ ( __A : str , __A : str , __A : Union[str, Any] ) -> Dict:
"""simple docstring"""
lowercase : List[Any] =[]
if task == "s2t":
lowercase : Optional[int] =hf_model.speechta.encoder.prenet.feature_encoder
lowercase : int =MAPPING_S2T
lowercase : List[str] =IGNORE_KEYS_S2T
elif task == "t2s":
lowercase : int =None
lowercase : List[Any] =MAPPING_T2S
lowercase : List[Any] =IGNORE_KEYS_T2S
elif task == "s2s":
lowercase : Dict =hf_model.speechta.encoder.prenet.feature_encoder
lowercase : int =MAPPING_S2S
lowercase : Dict =IGNORE_KEYS_S2S
else:
raise ValueError(F'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(a_ , a_ ):
logger.info(F'{name} was ignored' )
continue
lowercase : int =False
if "conv_layers" in name:
load_conv_layer(
a_ , a_ , a_ , a_ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : str =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
lowercase : List[Any] =key.split('''.*.''' )
if prefix in name and suffix in name:
lowercase : List[Any] =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
lowercase : List[Any] =True
if "*" in mapped_key:
lowercase : Union[str, Any] =name.split(a_ )[0].split('''.''' )[-2]
lowercase : Optional[Any] =mapped_key.replace('''*''' , a_ )
if "weight_g" in name:
lowercase : Dict ='''weight_g'''
elif "weight_v" in name:
lowercase : List[str] ='''weight_v'''
elif "bias" in name:
lowercase : List[Any] ='''bias'''
elif "weight" in name:
lowercase : List[Any] ='''weight'''
elif "running_mean" in name:
lowercase : Tuple ='''running_mean'''
elif "running_var" in name:
lowercase : Tuple ='''running_var'''
elif "num_batches_tracked" in name:
lowercase : Tuple ='''num_batches_tracked'''
else:
lowercase : Union[str, Any] =None
set_recursively(a_ , a_ , a_ , a_ , a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( __A : int , __A : str , __A : Union[str, Any] , __A : List[Any] , __A : int ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : int =name.split('''.''' )
lowercase : Optional[Any] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
lowercase : Optional[Any] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
lowercase : Optional[int] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
lowercase : Union[str, Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
lowercase : List[str] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(a_ )
@torch.no_grad()
def lowercase_ ( __A : str , __A : str , __A : Tuple , __A : Union[str, Any]=None , __A : Any=None , __A : Union[str, Any]=None , ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
lowercase : int =SpeechTaConfig.from_pretrained(a_ )
else:
lowercase : List[str] =SpeechTaConfig()
if task == "s2t":
lowercase : Any =config.max_text_positions
lowercase : Optional[Any] =SpeechTaForSpeechToText(a_ )
elif task == "t2s":
lowercase : Dict =1_8_7_6
lowercase : str =6_0_0
lowercase : str =config.max_speech_positions
lowercase : List[Any] =SpeechTaForTextToSpeech(a_ )
elif task == "s2s":
lowercase : List[Any] =1_8_7_6
lowercase : Optional[int] =config.max_speech_positions
lowercase : Optional[int] =SpeechTaForSpeechToSpeech(a_ )
else:
raise ValueError(F'Unknown task name: {task}' )
if vocab_path:
lowercase : Any =SpeechTaTokenizer(a_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
lowercase : Optional[Any] =AddedToken('''<mask>''' , lstrip=a_ , rstrip=a_ )
lowercase : int =mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token} )
tokenizer.add_tokens(['''<ctc_blank>'''] )
lowercase : int =SpeechTaFeatureExtractor()
lowercase : Dict =SpeechTaProcessor(tokenizer=a_ , feature_extractor=a_ )
processor.save_pretrained(a_ )
lowercase : str =torch.load(a_ )
recursively_load_weights(fairseq_checkpoint['''model'''] , a_ , a_ )
model.save_pretrained(a_ )
if repo_id:
print('''Pushing to the hub...''' )
processor.push_to_hub(a_ )
model.push_to_hub(a_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
'--task',
default='s2t',
type=str,
help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.', 'G': '--.',
'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..', 'M': '--', 'N': '-.',
'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.', 'S': '...', 'T': '-', 'U': '..-',
'V': '...-', 'W': '.--', 'X': '-..-', 'Y': '-.--', 'Z': '--..', '1': '.----',
'2': '..---', '3': '...--', '4': '....-', '5': '.....', '6': '-....', '7': '--...',
'8': '---..', '9': '----.', '0': '-----', '&': '.-...', '@': '.--.-.',
':': '---...', ',': '--..--', '.': '.-.-.-', '\'': '.----.', '"': '.-..-.',
'?': '..--..', '/': '-..-.', '=': '-...-', '+': '.-.-.', '-': '-....-',
'(': '-.--.', ')': '-.--.-', '!': '-.-.--', ' ': '/'
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
SCREAMING_SNAKE_CASE = {value: key for key, value in MORSE_CODE_DICT.items()}
def lowercase_ ( __A : Tuple ) -> int:
"""simple docstring"""
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def lowercase_ ( __A : Optional[int] ) -> Any:
"""simple docstring"""
return "".join(REVERSE_DICT[char] for char in message.split() )
def lowercase_ ( ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[int] ='''Morse code here!'''
print(UpperCAmelCase__ )
lowercase : Union[str, Any] =encrypt(UpperCAmelCase__ )
print(UpperCAmelCase__ )
lowercase : List[Any] =decrypt(UpperCAmelCase__ )
print(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def lowercase_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
def lowercase_ ( __A : int | str ) -> bool:
"""simple docstring"""
lowercase : Optional[Any] =str(_UpperCamelCase )
return n == n[::-1]
def lowercase_ ( __A : int = 1_0_0_0_0_0_0 ) -> Dict:
"""simple docstring"""
lowercase : List[str] =0
for i in range(1 , _UpperCamelCase ):
if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
from math import pow
def lowercase_ ( __A : Any , __A : Dict , __A : Optional[Any] , __A : int , __A : Any , ) -> Optional[int]:
"""simple docstring"""
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
lowercase : int =int(pow(__A , __A ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
lowercase , lowercase : List[Any] =backtrack(
__A , __A , current_number + 1 , __A , __A )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
lowercase , lowercase : Tuple =backtrack(
__A , __A , current_number + 1 , __A , __A )
return current_sum, solutions_count
def lowercase_ ( __A : Any , __A : Union[str, Any] ) -> int:
"""simple docstring"""
if not (1 <= needed_sum <= 1_0_0_0 and 2 <= power <= 1_0):
raise ValueError(
'''Invalid input\n'''
'''needed_sum must be between 1 and 1000, power between 2 and 10.''' )
return backtrack(__A , __A , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
SCREAMING_SNAKE_CASE = [
"good first issue",
"feature request",
"wip",
]
def lowercase_ ( ) -> int:
"""simple docstring"""
lowercase : Union[str, Any] =Github(os.environ['''GITHUB_TOKEN'''] )
lowercase : List[Any] =g.get_repo('''huggingface/accelerate''' )
lowercase : Tuple =repo.get_issues(state='''open''' )
for issue in open_issues:
lowercase : str =sorted([comment for comment in issue.get_comments()] , key=lambda __A : i.created_at , reverse=__A )
lowercase : str =comments[0] if len(__A ) > 0 else None
lowercase : Optional[int] =dt.utcnow()
lowercase : Optional[Any] =(current_time - issue.updated_at).days
lowercase : List[Any] =(current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='''closed''' )
elif (
days_since_updated > 2_3
and days_since_creation >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
if __name__ == "__main__":
main()
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
SCREAMING_SNAKE_CASE = '\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n'
SCREAMING_SNAKE_CASE = '\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n'
SCREAMING_SNAKE_CASE = '\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: \'auto\' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default \'gpt2-large\' Use one of [\'gpt2\', \'gpt2-medium\', \'gpt2-large\', \'gpt2-xl\'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: "c" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric(\'mauve\')\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/krishnap25/mauve''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/krishnap25/mauve'''] , reference_urls=[
'''https://arxiv.org/abs/2102.01454''',
'''https://github.com/krishnap25/mauve''',
] , )
def A__ ( self : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : List[Any]=None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : str="auto" , UpperCAmelCase : str=-1 , UpperCAmelCase : List[Any]=0.9 , UpperCAmelCase : str=5 , UpperCAmelCase : Tuple=500 , UpperCAmelCase : Optional[int]="gpt2-large" , UpperCAmelCase : Union[str, Any]=-1 , UpperCAmelCase : Union[str, Any]=1024 , UpperCAmelCase : Dict=25 , UpperCAmelCase : Dict=5 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Optional[int]=25 , ) -> Tuple:
'''simple docstring'''
lowercase : Dict =compute_mauve(
p_text=UpperCAmelCase , q_text=UpperCAmelCase , p_features=UpperCAmelCase , q_features=UpperCAmelCase , p_tokens=UpperCAmelCase , q_tokens=UpperCAmelCase , num_buckets=UpperCAmelCase , pca_max_data=UpperCAmelCase , kmeans_explained_var=UpperCAmelCase , kmeans_num_redo=UpperCAmelCase , kmeans_max_iter=UpperCAmelCase , featurize_model_name=UpperCAmelCase , device_id=UpperCAmelCase , max_text_length=UpperCAmelCase , divergence_curve_discretization_size=UpperCAmelCase , mauve_scaling_factor=UpperCAmelCase , verbose=UpperCAmelCase , seed=UpperCAmelCase , )
return out
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from transformers import (
VisualBertConfig,
VisualBertForMultipleChoice,
VisualBertForPreTraining,
VisualBertForQuestionAnswering,
VisualBertForVisualReasoning,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = [
("""bert.bert""", """visual_bert"""),
("""bert.cls""", """cls"""),
("""bert.classifier""", """cls"""),
("""token_type_embeddings_visual""", """visual_token_type_embeddings"""),
("""position_embeddings_visual""", """visual_position_embeddings"""),
("""projection""", """visual_projection"""),
]
SCREAMING_SNAKE_CASE = [
"""nlvr2_coco_pre_trained.th""",
"""nlvr2_fine_tuned.th""",
"""nlvr2_pre_trained.th""",
"""vcr_coco_pre_train.th""",
"""vcr_fine_tune.th""",
"""vcr_pre_train.th""",
"""vqa_coco_pre_trained.th""",
"""vqa_fine_tuned.th""",
"""vqa_pre_trained.th""",
]
def lowercase_ ( __A : str ) -> str:
"""simple docstring"""
lowercase : List[Any] =torch.load(UpperCamelCase__ , map_location='''cpu''' )
return sd
def lowercase_ ( __A : Union[str, Any] , __A : str , __A : Tuple=rename_keys_prefix ) -> Tuple:
"""simple docstring"""
lowercase : Dict =OrderedDict()
lowercase : Dict =torch.arange(config.max_position_embeddings ).expand((1, -1) )
# detector_d = OrderedDict()
for key in d:
if "detector" in key:
# detector_d[key.replace('detector.','')] = d[key]
continue
lowercase : Optional[int] =key
for name_pair in rename_keys_prefix:
lowercase : Union[str, Any] =new_key.replace(name_pair[0] , name_pair[1] )
lowercase : str =d[key]
if key == "bert.cls.predictions.decoder.weight":
# Old bert code didn't have `decoder.bias`, but was added separately
lowercase : Union[str, Any] =new_d['''cls.predictions.bias''']
return new_d
@torch.no_grad()
def lowercase_ ( __A : str , __A : str ) -> List[str]:
"""simple docstring"""
assert (
checkpoint_path.split('''/''' )[-1] in ACCEPTABLE_CHECKPOINTS
), F'The checkpoint provided must be in {ACCEPTABLE_CHECKPOINTS}.'
# Get Config
if "pre" in checkpoint_path:
lowercase : Optional[int] ='''pretraining'''
if "vcr" in checkpoint_path:
lowercase : Union[str, Any] ={'''visual_embedding_dim''': 5_1_2}
elif "vqa_advanced" in checkpoint_path:
lowercase : Tuple ={'''visual_embedding_dim''': 2_0_4_8}
elif "vqa" in checkpoint_path:
lowercase : Tuple ={'''visual_embedding_dim''': 2_0_4_8}
elif "nlvr" in checkpoint_path:
lowercase : str ={'''visual_embedding_dim''': 1_0_2_4}
else:
raise NotImplementedError(F'No implementation found for `{checkpoint_path}`.' )
else:
if "vcr" in checkpoint_path:
lowercase : Any ={'''visual_embedding_dim''': 5_1_2}
lowercase : List[str] ='''multichoice'''
elif "vqa_advanced" in checkpoint_path:
lowercase : Optional[int] ={'''visual_embedding_dim''': 2_0_4_8}
lowercase : List[str] ='''vqa_advanced'''
elif "vqa" in checkpoint_path:
lowercase : List[Any] ={'''visual_embedding_dim''': 2_0_4_8, '''num_labels''': 3_1_2_9}
lowercase : Any ='''vqa'''
elif "nlvr" in checkpoint_path:
lowercase : Optional[int] ={
'''visual_embedding_dim''': 1_0_2_4,
'''num_labels''': 2,
}
lowercase : List[Any] ='''nlvr'''
lowercase : List[str] =VisualBertConfig(**UpperCamelCase__ )
# Load State Dict
lowercase : Any =load_state_dict(UpperCamelCase__ )
lowercase : Union[str, Any] =get_new_dict(UpperCamelCase__ , UpperCamelCase__ )
if model_type == "pretraining":
lowercase : Optional[Any] =VisualBertForPreTraining(UpperCamelCase__ )
elif model_type == "vqa":
lowercase : Optional[Any] =VisualBertForQuestionAnswering(UpperCamelCase__ )
elif model_type == "nlvr":
lowercase : int =VisualBertForVisualReasoning(UpperCamelCase__ )
elif model_type == "multichoice":
lowercase : Dict =VisualBertForMultipleChoice(UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
# Save Checkpoints
Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('orig_checkpoint_path', type=str, help='A path to .th on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_visual_bert_checkpoint(args.orig_checkpoint_path, args.pytorch_dump_folder_path)
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : List[str]=30 , UpperCAmelCase : List[str]=400 , UpperCAmelCase : List[str]=True , UpperCAmelCase : Any=None , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Any=[0.5, 0.5, 0.5] , UpperCAmelCase : int=[0.5, 0.5, 0.5] , UpperCAmelCase : Dict=True , UpperCAmelCase : Optional[Any]=1 / 255 , UpperCAmelCase : Optional[int]=True , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowercase : List[Any] =parent
lowercase : Tuple =batch_size
lowercase : Tuple =num_channels
lowercase : List[str] =min_resolution
lowercase : Optional[int] =max_resolution
lowercase : Any =do_resize
lowercase : Tuple =size
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean
lowercase : Any =image_std
lowercase : int =do_rescale
lowercase : Tuple =rescale_factor
lowercase : int =do_pad
def A__ ( self : str ) -> Any:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self : List[str] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any]=False ) -> Optional[Any]:
'''simple docstring'''
if not batched:
lowercase : Optional[Any] =image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
lowercase , lowercase : Tuple =image.size
else:
lowercase , lowercase : int =image.shape[1], image.shape[2]
if w < h:
lowercase : Union[str, Any] =int(self.size['''shortest_edge'''] * h / w )
lowercase : int =self.size['''shortest_edge''']
elif w > h:
lowercase : Optional[Any] =self.size['''shortest_edge''']
lowercase : List[str] =int(self.size['''shortest_edge'''] * w / h )
else:
lowercase : List[str] =self.size['''shortest_edge''']
lowercase : str =self.size['''shortest_edge''']
else:
lowercase : Tuple =[]
for image in image_inputs:
lowercase , lowercase : Tuple =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase : List[str] =max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
lowercase : Dict =max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase__ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = DeformableDetrImageProcessor if is_vision_available() else None
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Optional[int] =DeformableDetrImageProcessingTester(self )
@property
def A__ ( self : str ) -> int:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_rescale''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_pad''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size''' ) )
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
lowercase : Optional[int] =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
def A__ ( self : List[Any] ) -> Any:
'''simple docstring'''
pass
def A__ ( self : Any ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowercase : Dict =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Tuple =self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase , lowercase : int =self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowercase : Tuple =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : str ) -> int:
'''simple docstring'''
lowercase : str =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowercase : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Optional[Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : int =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Tuple =self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : Any ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : Optional[int] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowercase : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : int =self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : List[Any] =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase , lowercase : Tuple =self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase : Any =json.loads(f.read() )
lowercase : Any ={'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowercase : Union[str, Any] =DeformableDetrImageProcessor()
lowercase : List[Any] =image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase : Tuple =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
lowercase : Dict =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase : Optional[Any] =torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
lowercase : Optional[int] =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
lowercase : Tuple =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase : List[str] =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
lowercase : int =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
lowercase : Tuple =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify orig_size
lowercase : Optional[Any] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
lowercase : Union[str, Any] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
@slow
def A__ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase : Tuple =json.loads(f.read() )
lowercase : List[str] ={'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowercase : List[str] =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase : Optional[Any] =DeformableDetrImageProcessor(format='''coco_panoptic''' )
lowercase : Dict =image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , masks_path=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase : Any =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
lowercase : List[str] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase : int =torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
lowercase : Optional[Any] =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
lowercase : List[str] =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase : Optional[int] =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
lowercase : Optional[int] =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
lowercase : List[str] =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify masks
lowercase : List[Any] =82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase )
# verify orig_size
lowercase : int =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
lowercase : Optional[int] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger()
def lowercase_ ( __A : int , __A : str , __A : LevitConfig , __A : Path , __A : bool = True ) -> str:
"""simple docstring"""
print(F'Converting {name}...' )
with torch.no_grad():
if hidden_sizes == 1_2_8:
if name[-1] == "S":
lowercase : List[str] =timm.create_model('''levit_128s''' , pretrained=__A )
else:
lowercase : List[Any] =timm.create_model('''levit_128''' , pretrained=__A )
if hidden_sizes == 1_9_2:
lowercase : int =timm.create_model('''levit_192''' , pretrained=__A )
if hidden_sizes == 2_5_6:
lowercase : Dict =timm.create_model('''levit_256''' , pretrained=__A )
if hidden_sizes == 3_8_4:
lowercase : str =timm.create_model('''levit_384''' , pretrained=__A )
from_model.eval()
lowercase : List[str] =LevitForImageClassificationWithTeacher(__A ).eval()
lowercase : str =OrderedDict()
lowercase : Optional[Any] =from_model.state_dict()
lowercase : Any =list(from_model.state_dict().keys() )
lowercase : str =list(our_model.state_dict().keys() )
print(len(__A ) , len(__A ) )
for i in range(len(__A ) ):
lowercase : int =weights[og_keys[i]]
our_model.load_state_dict(__A )
lowercase : int =torch.randn((2, 3, 2_2_4, 2_2_4) )
lowercase : Optional[Any] =from_model(__A )
lowercase : Optional[int] =our_model(__A ).logits
assert torch.allclose(__A , __A ), "The model logits don't match the original one."
lowercase : Any =name
print(__A )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase : int =LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'Pushed {checkpoint_name}' )
def lowercase_ ( __A : Path , __A : str = None , __A : bool = True ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] ='''imagenet-1k-id2label.json'''
lowercase : Optional[int] =1_0_0_0
lowercase : List[Any] =(1, num_labels)
lowercase : Optional[Any] ='''huggingface/label-files'''
lowercase : str =num_labels
lowercase : Union[str, Any] =json.load(open(hf_hub_download(__A , __A , repo_type='''dataset''' ) , '''r''' ) )
lowercase : Any ={int(__A ): v for k, v in idalabel.items()}
lowercase : Optional[Any] =idalabel
lowercase : int ={v: k for k, v in idalabel.items()}
lowercase : Dict =partial(__A , num_labels=__A , idalabel=__A , labelaid=__A )
lowercase : int ={
'''levit-128S''': 1_2_8,
'''levit-128''': 1_2_8,
'''levit-192''': 1_9_2,
'''levit-256''': 2_5_6,
'''levit-384''': 3_8_4,
}
lowercase : Optional[Any] ={
'''levit-128S''': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'''levit-128''': ImageNetPreTrainedConfig(
hidden_sizes=[1_2_8, 2_5_6, 3_8_4] , num_attention_heads=[4, 8, 1_2] , depths=[4, 4, 4] , key_dim=[1_6, 1_6, 1_6] , drop_path_rate=0 , ),
'''levit-192''': ImageNetPreTrainedConfig(
hidden_sizes=[1_9_2, 2_8_8, 3_8_4] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'''levit-256''': ImageNetPreTrainedConfig(
hidden_sizes=[2_5_6, 3_8_4, 5_1_2] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0 , ),
'''levit-384''': ImageNetPreTrainedConfig(
hidden_sizes=[3_8_4, 5_1_2, 7_6_8] , num_attention_heads=[6, 9, 1_2] , depths=[4, 4, 4] , key_dim=[3_2, 3_2, 3_2] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __A , names_to_config[model_name] , __A , __A )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __A , __A , __A , __A )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default=None,
type=str,
help='The name of the model you wish to convert, it must be one of the supported Levit* architecture,',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='levit-dump-folder/',
type=Path,
required=False,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub')
parser.add_argument(
'--no-push_to_hub',
dest='push_to_hub',
action='store_false',
help='Do not push model and image processor to the hub',
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
SCREAMING_SNAKE_CASE = Mapping[str, np.ndarray]
SCREAMING_SNAKE_CASE = Mapping[str, Any] # Is a nested dict.
SCREAMING_SNAKE_CASE = 0.01
@dataclasses.dataclass(frozen=__A )
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
UpperCamelCase_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
UpperCamelCase_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
UpperCamelCase_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
UpperCamelCase_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
UpperCamelCase_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
UpperCamelCase_ = None
# Templates used to generate this protein (prediction-only)
UpperCamelCase_ = None
# Chain corresponding to each parent
UpperCamelCase_ = None
def lowercase_ ( __A : str ) -> Protein:
"""simple docstring"""
lowercase : int =R'''(\[[A-Z]+\]\n)'''
lowercase : List[str] =[tag.strip() for tag in re.split(__A , __A ) if len(__A ) > 0]
lowercase : Iterator[Tuple[str, List[str]]] =zip(tags[0::2] , [l.split('''\n''' ) for l in tags[1::2]] )
lowercase : List[str] =["N", "CA", "C"]
lowercase : int =None
lowercase : List[Any] =None
lowercase : Dict =None
for g in groups:
if "[PRIMARY]" == g[0]:
lowercase : List[Any] =g[1][0].strip()
for i in range(len(__A ) ):
if seq[i] not in residue_constants.restypes:
lowercase : List[Any] ='''X''' # FIXME: strings are immutable
lowercase : str =np.array(
[residue_constants.restype_order.get(__A , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowercase : List[List[float]] =[]
for axis in range(3 ):
tertiary.append(list(map(__A , g[1][axis].split() ) ) )
lowercase : int =np.array(__A )
lowercase : List[str] =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__A ):
lowercase : Optional[int] =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowercase : Any =np.array(list(map({'''-''': 0, '''+''': 1}.get , g[1][0].strip() ) ) )
lowercase : str =np.zeros(
(
len(__A ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__A ):
lowercase : List[str] =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__A , atom_mask=__A , aatype=__A , residue_index=np.arange(len(__A ) ) , b_factors=__A , )
def lowercase_ ( __A : Protein , __A : int = 0 ) -> List[str]:
"""simple docstring"""
lowercase : List[str] =[]
lowercase : List[str] =prot.remark
if remark is not None:
pdb_headers.append(F'REMARK {remark}' )
lowercase : Union[str, Any] =prot.parents
lowercase : Optional[int] =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowercase : List[str] =[p for i, p in zip(__A , __A ) if i == chain_id]
if parents is None or len(__A ) == 0:
lowercase : Optional[Any] =['''N/A''']
pdb_headers.append(F'PARENT {" ".join(__A )}' )
return pdb_headers
def lowercase_ ( __A : Protein , __A : str ) -> str:
"""simple docstring"""
lowercase : List[str] =[]
lowercase : Any =pdb_str.split('''\n''' )
lowercase : Tuple =prot.remark
if remark is not None:
out_pdb_lines.append(F'REMARK {remark}' )
lowercase : List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
lowercase : List[str] =[]
if prot.parents_chain_index is not None:
lowercase : Dict[str, List[str]] ={}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__A ) , [] )
parent_dict[str(__A )].append(__A )
lowercase : int =max([int(__A ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowercase : Union[str, Any] =parent_dict.get(str(__A ) , ['''N/A'''] )
parents_per_chain.append(__A )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowercase : Optional[int] =[['''N/A''']]
def make_parent_line(__A : Sequence[str] ) -> str:
return F'PARENT {" ".join(__A )}'
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowercase : Dict =0
for i, l in enumerate(__A ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__A )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__A ):
lowercase : Optional[Any] =parents_per_chain[chain_counter]
else:
lowercase : str =['''N/A''']
out_pdb_lines.append(make_parent_line(__A ) )
return "\n".join(__A )
def lowercase_ ( __A : Protein ) -> str:
"""simple docstring"""
lowercase : List[str] =residue_constants.restypes + ['''X''']
def res_atoa(__A : int ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , '''UNK''' )
lowercase : Union[str, Any] =residue_constants.atom_types
lowercase : List[str] =[]
lowercase : Dict =prot.atom_mask
lowercase : Tuple =prot.aatype
lowercase : List[Any] =prot.atom_positions
lowercase : List[str] =prot.residue_index.astype(np.intaa )
lowercase : Dict =prot.b_factors
lowercase : Optional[Any] =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError('''Invalid aatypes.''' )
lowercase : List[str] =get_pdb_headers(__A )
if len(__A ) > 0:
pdb_lines.extend(__A )
lowercase : Any =aatype.shape[0]
lowercase : Any =1
lowercase : Any =0
lowercase : int =string.ascii_uppercase
lowercase : List[str] =None
# Add all atom sites.
for i in range(__A ):
lowercase : Any =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__A , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowercase : str ='''ATOM'''
lowercase : Optional[int] =atom_name if len(__A ) == 4 else F' {atom_name}'
lowercase : List[str] =''''''
lowercase : int =''''''
lowercase : Optional[Any] =1.00
lowercase : Tuple =atom_name[0] # Protein supports only C, N, O, S, this works.
lowercase : Union[str, Any] =''''''
lowercase : List[str] ='''A'''
if chain_index is not None:
lowercase : List[Any] =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowercase : str =(
F'{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}'
F'{res_name_a:>3} {chain_tag:>1}'
F'{residue_index[i]:>4}{insertion_code:>1} '
F'{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}'
F'{occupancy:>6.2f}{b_factor:>6.2f} '
F'{element:>2}{charge:>2}'
)
pdb_lines.append(__A )
atom_index += 1
lowercase : Union[str, Any] =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowercase : Optional[Any] =True
lowercase : Dict =chain_index[i + 1]
if should_terminate:
# Close the chain.
lowercase : Union[str, Any] ='''TER'''
lowercase : Any =(
F'{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}'
)
pdb_lines.append(__A )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__A , __A ) )
pdb_lines.append('''END''' )
pdb_lines.append('''''' )
return "\n".join(__A )
def lowercase_ ( __A : Protein ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowercase_ ( __A : FeatureDict , __A : ModelOutput , __A : Optional[np.ndarray] = None , __A : Optional[np.ndarray] = None , __A : Optional[str] = None , __A : Optional[Sequence[str]] = None , __A : Optional[Sequence[int]] = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features['''aatype'''] , atom_positions=result['''final_atom_positions'''] , atom_mask=result['''final_atom_mask'''] , residue_index=features['''residue_index'''] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result['''final_atom_mask'''] ) , chain_index=__A , remark=__A , parents=__A , parents_chain_index=__A , )
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
SCREAMING_SNAKE_CASE = 3
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
print('''Generating primitive root of p''' )
while True:
lowercase : List[str] =random.randrange(3 , __A )
if pow(__A , 2 , __A ) == 1:
continue
if pow(__A , __A , __A ) == 1:
continue
return g
def lowercase_ ( __A : int ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print('''Generating prime p...''' )
lowercase : Any =rabin_miller.generate_large_prime(__A ) # select large prime number.
lowercase : List[str] =primitive_root(__A ) # one primitive root on modulo p.
lowercase : List[str] =random.randrange(3 , __A ) # private_key -> have to be greater than 2 for safety.
lowercase : List[str] =cryptomath.find_mod_inverse(pow(__A , __A , __A ) , __A )
lowercase : Union[str, Any] =(key_size, e_a, e_a, p)
lowercase : Optional[Any] =(key_size, d)
return public_key, private_key
def lowercase_ ( __A : str , __A : int ) -> None:
"""simple docstring"""
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print('''\nWARNING:''' )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
'''Use a different name or delete these files and re-run this program.''' )
sys.exit()
lowercase : str =generate_key(__A )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , '''w''' ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , '''w''' ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def lowercase_ ( ) -> None:
"""simple docstring"""
print('''Making key files...''' )
make_key_files('''elgamal''' , 2_0_4_8 )
print('''Key files generation successful''' )
if __name__ == "__main__":
main()
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'huggingface/informer-tourism-monthly': (
'https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json'
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''informer'''
UpperCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self : Optional[int] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : str = "student_t" , UpperCAmelCase : str = "nll" , UpperCAmelCase : int = 1 , UpperCAmelCase : List[int] = None , UpperCAmelCase : Optional[Union[str, bool]] = "mean" , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : int = 64 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : bool = True , UpperCAmelCase : str = "gelu" , UpperCAmelCase : float = 0.0_5 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : float = 0.1 , UpperCAmelCase : int = 100 , UpperCAmelCase : float = 0.0_2 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : str = "prob" , UpperCAmelCase : int = 5 , UpperCAmelCase : bool = True , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =prediction_length
lowercase : Union[str, Any] =context_length or prediction_length
lowercase : Optional[Any] =distribution_output
lowercase : int =loss
lowercase : str =input_size
lowercase : Optional[Any] =num_time_features
lowercase : Union[str, Any] =lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
lowercase : Any =scaling
lowercase : Any =num_dynamic_real_features
lowercase : Any =num_static_real_features
lowercase : Any =num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The cardinality should be a list of the same length as `num_static_categorical_features`''' )
lowercase : Dict =cardinality
else:
lowercase : Tuple =[0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(UpperCAmelCase ) != num_static_categorical_features:
raise ValueError(
'''The embedding dimension should be a list of the same length as `num_static_categorical_features`''' )
lowercase : str =embedding_dimension
else:
lowercase : Optional[Any] =[min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowercase : str =num_parallel_samples
# Transformer architecture configuration
lowercase : List[Any] =input_size * len(self.lags_sequence ) + self._number_of_features
lowercase : List[Any] =d_model
lowercase : Optional[Any] =encoder_attention_heads
lowercase : Any =decoder_attention_heads
lowercase : Any =encoder_ffn_dim
lowercase : Dict =decoder_ffn_dim
lowercase : Optional[Any] =encoder_layers
lowercase : Union[str, Any] =decoder_layers
lowercase : str =dropout
lowercase : Tuple =attention_dropout
lowercase : Optional[int] =activation_dropout
lowercase : Tuple =encoder_layerdrop
lowercase : Any =decoder_layerdrop
lowercase : str =activation_function
lowercase : Any =init_std
lowercase : Tuple =use_cache
# Informer
lowercase : Optional[int] =attention_type
lowercase : Union[str, Any] =sampling_factor
lowercase : Tuple =distil
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : list , __A : list ) -> list:
"""simple docstring"""
if len(__A ) != 2 or len(a[0] ) != 2 or len(__A ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
lowercase : Optional[Any] =[
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def lowercase_ ( __A : list , __A : list ) -> Optional[Any]:
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def lowercase_ ( __A : list , __A : list ) -> Any:
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(__A ) )
]
def lowercase_ ( __A : list ) -> tuple[list, list, list, list]:
"""simple docstring"""
if len(__A ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
lowercase : str =len(__A )
lowercase : Optional[Any] =matrix_length // 2
lowercase : str =[[a[i][j] for j in range(__A , __A )] for i in range(__A )]
lowercase : Union[str, Any] =[
[a[i][j] for j in range(__A , __A )] for i in range(__A , __A )
]
lowercase : str =[[a[i][j] for j in range(__A )] for i in range(__A )]
lowercase : Dict =[[a[i][j] for j in range(__A )] for i in range(__A , __A )]
return top_left, top_right, bot_left, bot_right
def lowercase_ ( __A : list ) -> tuple[int, int]:
"""simple docstring"""
return len(__A ), len(matrix[0] )
def lowercase_ ( __A : list ) -> None:
"""simple docstring"""
print('''\n'''.join(str(__A ) for line in matrix ) )
def lowercase_ ( __A : list , __A : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__A ) == (2, 2):
return default_matrix_multiplication(__A , __A )
lowercase : Any =split_matrix(__A )
lowercase : Optional[Any] =split_matrix(__A )
lowercase : Any =actual_strassen(__A , matrix_subtraction(__A , __A ) )
lowercase : Optional[Any] =actual_strassen(matrix_addition(__A , __A ) , __A )
lowercase : int =actual_strassen(matrix_addition(__A , __A ) , __A )
lowercase : Tuple =actual_strassen(__A , matrix_subtraction(__A , __A ) )
lowercase : List[Any] =actual_strassen(matrix_addition(__A , __A ) , matrix_addition(__A , __A ) )
lowercase : Any =actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
lowercase : Optional[Any] =actual_strassen(matrix_subtraction(__A , __A ) , matrix_addition(__A , __A ) )
lowercase : Optional[int] =matrix_addition(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
lowercase : Union[str, Any] =matrix_addition(__A , __A )
lowercase : Dict =matrix_addition(__A , __A )
lowercase : int =matrix_subtraction(matrix_subtraction(matrix_addition(__A , __A ) , __A ) , __A )
# construct the new matrix from our 4 quadrants
lowercase : str =[]
for i in range(len(__A ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(__A ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def lowercase_ ( __A : list , __A : list ) -> list:
"""simple docstring"""
if matrix_dimensions(__A )[1] != matrix_dimensions(__A )[0]:
lowercase : Dict =(
'''Unable to multiply these matrices, please check the dimensions.\n'''
F'Matrix A: {matrixa}\n'
F'Matrix B: {matrixa}'
)
raise Exception(__A )
lowercase : Dict =matrix_dimensions(__A )
lowercase : Optional[int] =matrix_dimensions(__A )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
lowercase : Optional[Any] =max(*__A , *__A )
lowercase : int =int(math.pow(2 , math.ceil(math.loga(__A ) ) ) )
lowercase : List[Any] =matrixa
lowercase : Union[str, Any] =matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
lowercase : Optional[int] =actual_strassen(__A , __A )
# Removing the additional zeros
for i in range(0 , __A ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , __A ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
SCREAMING_SNAKE_CASE = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
from functools import lru_cache
from math import ceil
SCREAMING_SNAKE_CASE = 100
SCREAMING_SNAKE_CASE = set(range(3, NUM_PRIMES, 2))
primes.add(2)
SCREAMING_SNAKE_CASE = 42
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=1_0_0 )
def lowercase_ ( __A : int ) -> set[int]:
"""simple docstring"""
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase : set[int] =set()
lowercase : int
lowercase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def lowercase_ ( __A : int = 5_0_0_0 ) -> int | None:
"""simple docstring"""
for number_to_partition in range(1 , __A ):
if len(partition(__A ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(f"""{solution() = }""")
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import PegasusConfig, PegasusForConditionalGeneration, PegasusTokenizer
from transformers.models.pegasus.configuration_pegasus import DEFAULTS, task_specific_params
SCREAMING_SNAKE_CASE = [
# replace left string with right string to get the relevant state_dict key (identical state dict to bart)
['memory_attention', 'encoder_attn'],
['attention', 'attn'],
['/', '.'],
['.LayerNorm.gamma', '_layer_norm.weight'],
['.LayerNorm.beta', '_layer_norm.bias'],
['r.layer_', 'r.layers.'],
['output_proj', 'out_proj'],
['ffn.dense_1.', 'fc2.'],
['ffn.dense.', 'fc1.'],
['ffn_layer_norm', 'final_layer_norm'],
['kernel', 'weight'],
['encoder_layer_norm.', 'encoder.layer_norm.'],
['decoder_layer_norm.', 'decoder.layer_norm.'],
['embeddings.weights', 'shared.weight'],
]
def lowercase_ ( __A : Tuple ) -> int:
"""simple docstring"""
for pegasus_name, hf_name in PATTERNS:
lowercase : Optional[int] =k.replace(__A , __A )
return k
def lowercase_ ( __A : dict , __A : dict ) -> PegasusForConditionalGeneration:
"""simple docstring"""
lowercase : List[str] =DEFAULTS.copy()
cfg_kwargs.update(__A )
lowercase : Dict =PegasusConfig(**__A )
lowercase : Optional[int] =PegasusForConditionalGeneration(__A )
lowercase : Union[str, Any] =torch_model.model.state_dict()
lowercase : Union[str, Any] ={}
for k, v in tf_weights.items():
lowercase : Dict =rename_state_dict_key(__A )
if new_k not in sd:
raise ValueError(F'could not find new key {new_k} in state dict. (converted from {k})' )
if "dense" in k or "proj" in new_k:
lowercase : Union[str, Any] =v.T
lowercase : Any =torch.tensor(__A , dtype=sd[new_k].dtype )
assert v.shape == sd[new_k].shape, F'{new_k}, {k}, {v.shape}, {sd[new_k].shape}'
# make sure embedding.padding_idx is respected
lowercase : int =torch.zeros_like(mapping['''shared.weight'''][cfg.pad_token_id + 1] )
lowercase : Dict =mapping['''shared.weight''']
lowercase : List[str] =mapping['''shared.weight''']
lowercase : Optional[int] ={k: torch.zeros_like(__A ) for k, v in sd.items() if k.endswith('''bias''' ) and k not in mapping}
mapping.update(**__A )
lowercase : List[str] =torch_model.model.load_state_dict(__A , strict=__A )
lowercase : Optional[int] =[
k for k in missing if k not in ['''encoder.embed_positions.weight''', '''decoder.embed_positions.weight''']
]
assert unexpected_missing == [], F'no matches found for the following torch keys {unexpected_missing}'
assert extra == [], F'no matches found for the following tf keys {extra}'
return torch_model
def lowercase_ ( __A : Optional[int]="./ckpt/aeslc/model.ckpt-32000" ) -> Dict:
"""simple docstring"""
lowercase : Dict =tf.train.list_variables(__A )
lowercase : str ={}
lowercase : Union[str, Any] =['''Adafactor''', '''global_step''']
for name, shape in tqdm(__A , desc='''converting tf checkpoint to dict''' ):
lowercase : int =any(pat in name for pat in ignore_name )
if skip_key:
continue
lowercase : List[Any] =tf.train.load_variable(__A , __A )
lowercase : Tuple =array
return tf_weights
def lowercase_ ( __A : str , __A : str ) -> Dict:
"""simple docstring"""
lowercase : Any =Path(__A ).parent.name
lowercase : List[Any] =task_specific_params[F'summarization_{dataset}']['''max_position_embeddings''']
lowercase : List[str] =PegasusTokenizer.from_pretrained('''sshleifer/pegasus''' , model_max_length=__A )
assert tok.model_max_length == desired_max_model_length
tok.save_pretrained(__A )
# convert model
lowercase : str =get_tf_weights_as_numpy(__A )
lowercase : List[Any] =task_specific_params[F'summarization_{dataset}']
if dataset == "large":
lowercase : Tuple =task_specific_params
lowercase : Optional[Any] =convert_pegasus(__A , __A )
torch_model.save_pretrained(__A )
lowercase : int =torch_model.state_dict()
sd.pop('''model.decoder.embed_positions.weight''' )
sd.pop('''model.encoder.embed_positions.weight''' )
torch.save(__A , Path(__A ) / '''pytorch_model.bin''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument('tf_ckpt_path', type=str, help='passed to tf.train.list_variables')
parser.add_argument('save_dir', default=None, type=str, help='Path to the output PyTorch model.')
SCREAMING_SNAKE_CASE = parser.parse_args()
if args.save_dir is None:
SCREAMING_SNAKE_CASE = Path(args.tf_ckpt_path).parent.name
SCREAMING_SNAKE_CASE = os.path.join('pegasus', dataset)
convert_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir)
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import YolosImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : Any , UpperCAmelCase : Optional[Any]=7 , UpperCAmelCase : Optional[Any]=3 , UpperCAmelCase : Optional[int]=30 , UpperCAmelCase : Any=400 , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Dict=True , UpperCAmelCase : List[Any]=[0.5, 0.5, 0.5] , UpperCAmelCase : Any=[0.5, 0.5, 0.5] , UpperCAmelCase : str=True , UpperCAmelCase : Optional[Any]=1 / 255 , UpperCAmelCase : int=True , ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
lowercase : List[str] =parent
lowercase : Union[str, Any] =batch_size
lowercase : List[Any] =num_channels
lowercase : List[str] =min_resolution
lowercase : Union[str, Any] =max_resolution
lowercase : Union[str, Any] =do_resize
lowercase : Optional[Any] =size
lowercase : List[Any] =do_normalize
lowercase : Tuple =image_mean
lowercase : Any =image_std
lowercase : Any =do_rescale
lowercase : str =rescale_factor
lowercase : Union[str, Any] =do_pad
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Tuple=False ) -> Tuple:
'''simple docstring'''
if not batched:
lowercase : str =image_inputs[0]
if isinstance(UpperCAmelCase , Image.Image ):
lowercase : Dict =image.size
else:
lowercase : int =image.shape[1], image.shape[2]
if w < h:
lowercase : List[Any] =int(self.size['''shortest_edge'''] * h / w )
lowercase : List[Any] =self.size['''shortest_edge''']
elif w > h:
lowercase : str =self.size['''shortest_edge''']
lowercase : str =int(self.size['''shortest_edge'''] * w / h )
else:
lowercase : Union[str, Any] =self.size['''shortest_edge''']
lowercase : List[str] =self.size['''shortest_edge''']
else:
lowercase : Optional[Any] =[]
for image in image_inputs:
lowercase : int =self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowercase : Union[str, Any] =max(UpperCAmelCase , key=lambda UpperCAmelCase : item[0] )[0]
lowercase : Dict =max(UpperCAmelCase , key=lambda UpperCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class UpperCAmelCase_ ( __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = YolosImageProcessor if is_vision_available() else None
def A__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Dict =YolosImageProcessingTester(self )
@property
def A__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''image_mean''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''image_std''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''do_resize''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''size''' ) )
def A__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
lowercase : int =self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
lowercase : List[Any] =self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCAmelCase )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84} )
self.assertEqual(image_processor.do_pad , UpperCAmelCase )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
pass
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
lowercase : Optional[int] =self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowercase : Tuple =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , Image.Image )
# Test not batched input
lowercase : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase : Union[str, Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : Any =self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
lowercase : int =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowercase : int =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , numpify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , np.ndarray )
# Test not batched input
lowercase : int =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase : str =self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : int =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase : int =self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Any =self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowercase : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test not batched input
lowercase : List[Any] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
lowercase : Optional[int] =self.image_processor_tester.get_expected_values(UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowercase : List[Any] =image_processing(UpperCAmelCase , return_tensors='''pt''' ).pixel_values
lowercase : Union[str, Any] =self.image_processor_tester.get_expected_values(UpperCAmelCase , batched=UpperCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =self.image_processing_class(**self.image_processor_dict )
lowercase : Tuple =self.image_processing_class(do_resize=UpperCAmelCase , do_normalize=UpperCAmelCase , do_rescale=UpperCAmelCase )
# create random PyTorch tensors
lowercase : str =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase , torchify=UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase , torch.Tensor )
# Test whether the method "pad" and calling the image processor return the same tensors
lowercase : Any =image_processing_a.pad(UpperCAmelCase , return_tensors='''pt''' )
lowercase : Any =image_processing_a(UpperCAmelCase , return_tensors='''pt''' )
self.assertTrue(
torch.allclose(encoded_images_with_method['''pixel_values'''] , encoded_images['''pixel_values'''] , atol=1e-4 ) )
@slow
def A__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[int] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f:
lowercase : Union[str, Any] =json.loads(f.read() )
lowercase : List[Any] ={'''image_id''': 3_9769, '''annotations''': target}
# encode them
lowercase : Any =YolosImageProcessor.from_pretrained('''hustvl/yolos-small''' )
lowercase : Optional[int] =image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase : Optional[int] =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
lowercase : Union[str, Any] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase : Optional[Any] =torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
lowercase : Any =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
lowercase : Optional[Any] =torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase : str =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
lowercase : Dict =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
lowercase : Optional[int] =torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify orig_size
lowercase : str =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
lowercase : Optional[Any] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
@slow
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : int =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f:
lowercase : Dict =json.loads(f.read() )
lowercase : Any ={'''file_name''': '''000000039769.png''', '''image_id''': 3_9769, '''segments_info''': target}
lowercase : Optional[Any] =pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' )
# encode them
lowercase : Optional[Any] =YolosImageProcessor(format='''coco_panoptic''' )
lowercase : Optional[int] =image_processing(images=UpperCAmelCase , annotations=UpperCAmelCase , masks_path=UpperCAmelCase , return_tensors='''pt''' )
# verify pixel values
lowercase : Optional[int] =torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['''pixel_values'''].shape , UpperCAmelCase )
lowercase : Optional[int] =torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , UpperCAmelCase , atol=1e-4 ) )
# verify area
lowercase : Dict =torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , UpperCAmelCase ) )
# verify boxes
lowercase : Optional[int] =torch.Size([6, 4] )
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , UpperCAmelCase )
lowercase : Tuple =torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , UpperCAmelCase , atol=1e-3 ) )
# verify image_id
lowercase : List[str] =torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , UpperCAmelCase ) )
# verify is_crowd
lowercase : Any =torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , UpperCAmelCase ) )
# verify class_labels
lowercase : Dict =torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , UpperCAmelCase ) )
# verify masks
lowercase : Optional[int] =82_2873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , UpperCAmelCase )
# verify orig_size
lowercase : List[Any] =torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , UpperCAmelCase ) )
# verify size
lowercase : Union[str, Any] =torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , UpperCAmelCase ) )
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = 42
UpperCamelCase_ = 42
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Iterable[int] ) -> None:
'''simple docstring'''
lowercase : Node | None =None
for i in sorted(UpperCAmelCase , reverse=UpperCAmelCase ):
lowercase : Optional[Any] =Node(UpperCAmelCase , self.head )
def __iter__( self : Any ) -> Iterator[int]:
'''simple docstring'''
lowercase : Dict =self.head
while node:
yield node.data
lowercase : List[Any] =node.next_node
def __len__( self : List[str] ) -> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Tuple ) -> str:
'''simple docstring'''
return " -> ".join([str(UpperCAmelCase ) for node in self] )
def lowercase_ ( __A : SortedLinkedList , __A : SortedLinkedList ) -> SortedLinkedList:
"""simple docstring"""
return SortedLinkedList(list(__A ) + list(__A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
SCREAMING_SNAKE_CASE = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation='relu')
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(32, (3, 3), activation='relu'))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=128, activation='relu'))
classifier.add(layers.Dense(units=1, activation='sigmoid'))
# Compiling the CNN
classifier.compile(
optimizer='adam', loss='binary_crossentropy', metrics=['accuracy']
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
SCREAMING_SNAKE_CASE = train_datagen.flow_from_directory(
'dataset/training_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
SCREAMING_SNAKE_CASE = test_datagen.flow_from_directory(
'dataset/test_set', target_size=(64, 64), batch_size=32, class_mode='binary'
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=30, validation_data=test_set
)
classifier.save('cnn.h5')
# Part 3 - Making new predictions
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.load_img(
'dataset/single_prediction/image.png', target_size=(64, 64)
)
SCREAMING_SNAKE_CASE = tf.keras.preprocessing.image.img_to_array(test_image)
SCREAMING_SNAKE_CASE = np.expand_dims(test_image, axis=0)
SCREAMING_SNAKE_CASE = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
SCREAMING_SNAKE_CASE = 'Normal'
if result[0][0] == 1:
SCREAMING_SNAKE_CASE = 'Abnormality detected'
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableUnCLIPImgaImgPipeline
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ = frozenset([] )
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] =32
lowercase : Optional[Any] =embedder_hidden_size
# image encoding components
lowercase : List[str] =CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
lowercase : str =CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=UpperCAmelCase , projection_dim=UpperCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
lowercase : int =StableUnCLIPImageNormalizer(embedding_dim=UpperCAmelCase )
lowercase : Union[str, Any] =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' )
torch.manual_seed(0 )
lowercase : str =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
torch.manual_seed(0 )
lowercase : Tuple =CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=UpperCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowercase : int =UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type='''projection''' , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=UpperCAmelCase , layers_per_block=1 , upcast_attention=UpperCAmelCase , use_linear_projection=UpperCAmelCase , )
torch.manual_seed(0 )
lowercase : int =DDIMScheduler(
beta_schedule='''scaled_linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , prediction_type='''v_prediction''' , set_alpha_to_one=UpperCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
lowercase : str =AutoencoderKL()
lowercase : int ={
# image encoding components
'''feature_extractor''': feature_extractor,
'''image_encoder''': image_encoder.eval(),
# image noising components
'''image_normalizer''': image_normalizer.eval(),
'''image_noising_scheduler''': image_noising_scheduler,
# regular denoising components
'''tokenizer''': tokenizer,
'''text_encoder''': text_encoder.eval(),
'''unet''': unet.eval(),
'''scheduler''': scheduler,
'''vae''': vae.eval(),
}
return components
def A__ ( self : Any , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any]=0 , UpperCAmelCase : int=True ) -> Optional[Any]:
'''simple docstring'''
if str(UpperCAmelCase ).startswith('''mps''' ):
lowercase : List[str] =torch.manual_seed(UpperCAmelCase )
else:
lowercase : Any =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase : Any =floats_tensor((1, 3, 32, 32) , rng=random.Random(UpperCAmelCase ) ).to(UpperCAmelCase )
if pil_image:
lowercase : str =input_image * 0.5 + 0.5
lowercase : Tuple =input_image.clamp(0 , 1 )
lowercase : Tuple =input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase : Dict =DiffusionPipeline.numpy_to_pil(UpperCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : Union[str, Any] ='''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase : Optional[int] =self.get_dummy_components()
lowercase : List[Any] =StableUnCLIPImgaImgPipeline(**UpperCAmelCase )
lowercase : str =sd_pipe.to(UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : Optional[Any] =self.get_dummy_inputs(UpperCAmelCase )
inputs.update({'''image_embeds''': None} )
lowercase : int =sd_pipe(**UpperCAmelCase ).images
lowercase : str =image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase : str =np.array([0.3_8_7_2, 0.7_2_2_4, 0.5_6_0_1, 0.4_7_4_1, 0.6_8_7_2, 0.5_8_1_4, 0.4_6_3_6, 0.3_8_6_7, 0.5_0_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =torch_device in ['''cpu''', '''mps''']
self._test_attention_slicing_forward_pass(test_max_difference=UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : int =torch_device in ['''cpu''', '''mps''']
self._test_inference_batch_single_identical(test_max_difference=UpperCAmelCase )
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=UpperCAmelCase )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase : Dict =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy''' )
lowercase : Union[str, Any] =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-l-img2img''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Tuple =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase : Dict =pipe(UpperCAmelCase , '''anime turle''' , generator=UpperCAmelCase , output_type='''np''' )
lowercase : int =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
lowercase : Optional[int] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy''' )
lowercase : Tuple =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : Any =torch.Generator(device='''cpu''' ).manual_seed(0 )
lowercase : Optional[Any] =pipe(UpperCAmelCase , '''anime turle''' , generator=UpperCAmelCase , output_type='''np''' )
lowercase : str =output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png''' )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase : Any =StableUnCLIPImgaImgPipeline.from_pretrained(
'''fusing/stable-unclip-2-1-h-img2img''' , torch_dtype=torch.floataa )
lowercase : List[Any] =pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowercase : List[str] =pipe(
UpperCAmelCase , '''anime turtle''' , num_inference_steps=2 , output_type='''np''' , )
lowercase : Dict =torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = False, False, False
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = True
UpperCamelCase_ = True
UpperCamelCase_ = None
# Automatically constructed
UpperCamelCase_ = '''dict'''
UpperCamelCase_ = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} )
UpperCamelCase_ = field(default='''Audio''' , init=__A , repr=__A )
def __call__( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.pa_type
def A__ ( self : Any , UpperCAmelCase : Union[str, bytes, dict] ) -> dict:
'''simple docstring'''
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"bytes": None, "path": value}
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
lowercase : List[Any] =BytesIO()
sf.write(UpperCAmelCase , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
lowercase : Union[str, Any] =np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
lowercase : List[Any] =np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2767
lowercase : List[str] =BytesIO(bytes() )
sf.write(UpperCAmelCase , UpperCAmelCase , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def A__ ( self : str , UpperCAmelCase : dict , UpperCAmelCase : Optional[Dict[str, Union[str, bool, None]]] = None ) -> dict:
'''simple docstring'''
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
lowercase : List[str] =(value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
lowercase : Optional[int] =xsplitext(UpperCAmelCase )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
lowercase : List[Any] =token_per_repo_id or {}
lowercase : List[Any] =path.split('''::''' )[-1]
try:
lowercase : Union[str, Any] =string_to_dict(UpperCAmelCase , config.HUB_DATASETS_URL )['''repo_id''']
lowercase : List[Any] =token_per_repo_id[repo_id]
except (ValueError, KeyError):
lowercase : Any =None
with xopen(UpperCAmelCase , '''rb''' , use_auth_token=UpperCAmelCase ) as f:
lowercase : List[str] =sf.read(UpperCAmelCase )
else:
lowercase : int =sf.read(UpperCAmelCase )
lowercase : List[Any] =array.T
if self.mono:
lowercase : List[str] =librosa.to_mono(UpperCAmelCase )
if self.sampling_rate and self.sampling_rate != sampling_rate:
lowercase : Dict =librosa.resample(UpperCAmelCase , orig_sr=UpperCAmelCase , target_sr=self.sampling_rate )
lowercase : Any =self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def A__ ( self : str ) -> Union["FeatureType", Dict[str, "FeatureType"]]:
'''simple docstring'''
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def A__ ( self : Optional[int] , UpperCAmelCase : Union[pa.StringArray, pa.StructArray] ) -> pa.StructArray:
'''simple docstring'''
if pa.types.is_string(storage.type ):
lowercase : Optional[int] =pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
lowercase : Optional[Any] =pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
lowercase : Union[str, Any] =pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
lowercase : List[Any] =pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
lowercase : List[Any] =pa.array([Audio().encode_example(UpperCAmelCase ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
lowercase : List[Any] =storage.field('''bytes''' )
else:
lowercase : Any =pa.array([None] * len(UpperCAmelCase ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
lowercase : Any =storage.field('''path''' )
else:
lowercase : int =pa.array([None] * len(UpperCAmelCase ) , type=pa.string() )
lowercase : Union[str, Any] =pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
def A__ ( self : List[Any] , UpperCAmelCase : pa.StructArray ) -> pa.StructArray:
'''simple docstring'''
@no_op_if_value_is_null
def path_to_bytes(UpperCAmelCase : Dict ):
with xopen(UpperCAmelCase , '''rb''' ) as f:
lowercase : str =f.read()
return bytes_
lowercase : List[str] =pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase : str =pa.array(
[os.path.basename(UpperCAmelCase ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
lowercase : Optional[int] =pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(UpperCAmelCase , self.pa_type )
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''swinv2'''
UpperCamelCase_ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[Any] , UpperCAmelCase : str=224 , UpperCAmelCase : str=4 , UpperCAmelCase : Dict=3 , UpperCAmelCase : Dict=96 , UpperCAmelCase : List[Any]=[2, 2, 6, 2] , UpperCAmelCase : Dict=[3, 6, 12, 24] , UpperCAmelCase : Tuple=7 , UpperCAmelCase : Tuple=4.0 , UpperCAmelCase : Tuple=True , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : int="gelu" , UpperCAmelCase : Dict=False , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : Union[str, Any]=1e-5 , UpperCAmelCase : List[Any]=32 , **UpperCAmelCase : Dict , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : int =image_size
lowercase : Any =patch_size
lowercase : List[str] =num_channels
lowercase : Optional[int] =embed_dim
lowercase : List[str] =depths
lowercase : Tuple =len(UpperCAmelCase )
lowercase : List[Any] =num_heads
lowercase : List[str] =window_size
lowercase : Union[str, Any] =mlp_ratio
lowercase : List[Any] =qkv_bias
lowercase : List[Any] =hidden_dropout_prob
lowercase : Optional[int] =attention_probs_dropout_prob
lowercase : Tuple =drop_path_rate
lowercase : List[Any] =hidden_act
lowercase : List[str] =use_absolute_embeddings
lowercase : List[str] =layer_norm_eps
lowercase : Optional[int] =initializer_range
lowercase : Any =encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
lowercase : str =int(embed_dim * 2 ** (len(UpperCAmelCase ) - 1) )
lowercase : str =(0, 0, 0, 0)
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : Any =tempfile.mkdtemp()
# fmt: off
lowercase : Optional[int] =['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
lowercase : List[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase : List[Any] ={
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
lowercase : Dict =os.path.join(self.tmpdirname , UpperCAmelCase )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any , **UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : Tuple , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
lowercase : Optional[int] =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowercase : Any =[Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def A__ ( self : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : Optional[Any] =self.get_tokenizer()
lowercase : Optional[int] =self.get_image_processor()
lowercase : int =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
lowercase : Optional[int] =VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A__ ( self : Tuple ) -> List[str]:
'''simple docstring'''
lowercase : Optional[Any] =VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase : Optional[int] =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase : str =self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 )
lowercase : str =VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCAmelCase )
def A__ ( self : List[Any] ) -> str:
'''simple docstring'''
lowercase : Any =self.get_image_processor()
lowercase : Optional[int] =self.get_tokenizer()
lowercase : Union[str, Any] =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : str =self.prepare_image_inputs()
lowercase : Any =image_processor(UpperCAmelCase , return_tensors='''np''' )
lowercase : Union[str, Any] =processor(images=UpperCAmelCase , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
lowercase : str =self.get_image_processor()
lowercase : Optional[Any] =self.get_tokenizer()
lowercase : Dict =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : int ='''lower newer'''
lowercase : List[str] =processor(text=UpperCAmelCase )
lowercase : Tuple =tokenizer(UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] =self.get_image_processor()
lowercase : Dict =self.get_tokenizer()
lowercase : Dict =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Dict ='''lower newer'''
lowercase : List[Any] =self.prepare_image_inputs()
lowercase : Dict =processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(UpperCAmelCase ):
processor()
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : List[str] =self.get_image_processor()
lowercase : str =self.get_tokenizer()
lowercase : str =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase : Tuple =processor.batch_decode(UpperCAmelCase )
lowercase : Optional[int] =tokenizer.batch_decode(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Union[str, Any] =self.get_image_processor()
lowercase : List[str] =self.get_tokenizer()
lowercase : Tuple =VisionTextDualEncoderProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[int] ='''lower newer'''
lowercase : Dict =self.prepare_image_inputs()
lowercase : Any =processor(text=UpperCAmelCase , images=UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
import requests
SCREAMING_SNAKE_CASE = 'https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey='
def lowercase_ ( __A : str ) -> None:
"""simple docstring"""
lowercase : Tuple =requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page['''articles'''] , 1 ):
print(F'{i}.) {article["title"]}' )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key='<Your BBC News API key goes here>')
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : Tuple =tempfile.mkdtemp()
lowercase : str =5
# Realm tok
lowercase : Optional[int] =[
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''test''',
'''question''',
'''this''',
'''is''',
'''the''',
'''first''',
'''second''',
'''third''',
'''fourth''',
'''fifth''',
'''record''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
lowercase : Optional[int] =os.path.join(self.tmpdirname , '''realm_tokenizer''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowercase : Optional[Any] =os.path.join(UpperCAmelCase , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
lowercase : Dict =os.path.join(self.tmpdirname , '''realm_block_records''' )
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
def A__ ( self : Any ) -> RealmTokenizer:
'''simple docstring'''
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''realm_tokenizer''' ) )
def A__ ( self : Dict ) -> Tuple:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =RealmConfig(num_block_records=self.num_block_records )
return config
def A__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''question''': ['''foo''', '''bar'''],
'''answers''': [['''Foo''', '''Bar'''], ['''Bar''']],
} )
return dataset
def A__ ( self : Tuple ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =np.array(
[
b'''This is the first record''',
b'''This is the second record''',
b'''This is the third record''',
b'''This is the fourth record''',
b'''This is the fifth record''',
b'''This is a longer longer longer record''',
] , dtype=UpperCAmelCase , )
return block_records
def A__ ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def A__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase : Union[str, Any] =self.get_config()
lowercase : Tuple =self.get_dummy_retriever()
lowercase : int =retriever.tokenizer
lowercase : Any =np.array([0, 3] , dtype='''long''' )
lowercase : Optional[int] =tokenizer(['''Test question'''] ).input_ids
lowercase : Optional[int] =tokenizer(
['''the fourth'''] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
lowercase : List[Any] =config.reader_seq_len
lowercase : Dict =retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='''np''' )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(len(UpperCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''first''', '''record''', '''[SEP]'''] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ['''[CLS]''', '''test''', '''question''', '''[SEP]''', '''this''', '''is''', '''the''', '''fourth''', '''record''', '''[SEP]'''] , )
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
lowercase : List[str] =self.get_config()
lowercase : Optional[Any] =self.get_dummy_retriever()
lowercase : int =retriever.tokenizer
lowercase : Optional[Any] =np.array([0, 3, 5] , dtype='''long''' )
lowercase : Union[str, Any] =tokenizer(['''Test question'''] ).input_ids
lowercase : Tuple =tokenizer(
['''the fourth''', '''longer longer'''] , add_special_tokens=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ).input_ids
lowercase : Tuple =config.reader_seq_len
lowercase : int =retriever(
UpperCAmelCase , UpperCAmelCase , answer_ids=UpperCAmelCase , max_length=UpperCAmelCase , return_tensors='''np''' )
self.assertEqual([False, True, True] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , UpperCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
# Test local path
lowercase : List[Any] =retriever.from_pretrained(os.path.join(self.tmpdirname , '''realm_block_records''' ) )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
# Test mocked remote path
with patch('''transformers.models.realm.retrieval_realm.hf_hub_download''' ) as mock_hf_hub_download:
lowercase : Dict =os.path.join(
os.path.join(self.tmpdirname , '''realm_block_records''' ) , _REALM_BLOCK_RECORDS_FILENAME )
lowercase : Dict =RealmRetriever.from_pretrained('''google/realm-cc-news-pretrained-openqa''' )
self.assertEqual(retriever.block_records[0] , b'''This is the first record''' )
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_ernie': ['ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ErnieConfig', 'ErnieOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST',
'ErnieForCausalLM',
'ErnieForMaskedLM',
'ErnieForMultipleChoice',
'ErnieForNextSentencePrediction',
'ErnieForPreTraining',
'ErnieForQuestionAnswering',
'ErnieForSequenceClassification',
'ErnieForTokenClassification',
'ErnieModel',
'ErniePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
import diffusers
from diffusers import (
AutoencoderKL,
EulerDiscreteScheduler,
StableDiffusionLatentUpscalePipeline,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.schedulers import KarrasDiffusionSchedulers
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
def lowercase_ ( __A : int ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =[tensor.shape for tensor in tensor_list]
return all(shape == shapes[0] for shape in shapes[1:] )
class UpperCAmelCase_ ( __A , __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = StableDiffusionLatentUpscalePipeline
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {
'''height''',
'''width''',
'''cross_attention_kwargs''',
'''negative_prompt_embeds''',
'''prompt_embeds''',
}
UpperCamelCase_ = PipelineTesterMixin.required_optional_params - {'''num_images_per_prompt'''}
UpperCamelCase_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
UpperCamelCase_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
UpperCamelCase_ = frozenset([] )
UpperCamelCase_ = True
@property
def A__ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : str =1
lowercase : List[str] =4
lowercase : Union[str, Any] =(16, 16)
lowercase : Dict =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCAmelCase )
return image
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
lowercase : Optional[int] =UNetaDConditionModel(
act_fn='''gelu''' , attention_head_dim=8 , norm_num_groups=UpperCAmelCase , block_out_channels=[32, 32, 64, 64] , time_cond_proj_dim=160 , conv_in_kernel=1 , conv_out_kernel=1 , cross_attention_dim=32 , down_block_types=(
'''KDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
'''KCrossAttnDownBlock2D''',
) , in_channels=8 , mid_block_type=UpperCAmelCase , only_cross_attention=UpperCAmelCase , out_channels=5 , resnet_time_scale_shift='''scale_shift''' , time_embedding_type='''fourier''' , timestep_post_act='''gelu''' , up_block_types=('''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KCrossAttnUpBlock2D''', '''KUpBlock2D''') , )
lowercase : List[str] =AutoencoderKL(
block_out_channels=[32, 32, 64, 64] , in_channels=3 , out_channels=3 , down_block_types=[
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
'''DownEncoderBlock2D''',
] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
lowercase : Union[str, Any] =EulerDiscreteScheduler(prediction_type='''sample''' )
lowercase : int =CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='''quick_gelu''' , projection_dim=512 , )
lowercase : Dict =CLIPTextModel(UpperCAmelCase )
lowercase : int =CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase : List[Any] ={
'''unet''': model.eval(),
'''vae''': vae.eval(),
'''scheduler''': scheduler,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
}
return components
def A__ ( self : Union[str, Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Optional[int]=0 ) -> List[str]:
'''simple docstring'''
if str(UpperCAmelCase ).startswith('''mps''' ):
lowercase : int =torch.manual_seed(UpperCAmelCase )
else:
lowercase : Union[str, Any] =torch.Generator(device=UpperCAmelCase ).manual_seed(UpperCAmelCase )
lowercase : List[str] ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': self.dummy_image.cpu(),
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : str ='''cpu'''
lowercase : List[str] =self.get_dummy_components()
lowercase : str =self.pipeline_class(**UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : List[str] =self.get_dummy_inputs(UpperCAmelCase )
lowercase : int =pipe(**UpperCAmelCase ).images
lowercase : str =image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 256, 256, 3) )
lowercase : Dict =np.array(
[0.4_7_2_2_2_4_1_2, 0.4_1_9_2_1_6_3_3, 0.4_4_7_1_7_4_3_4, 0.4_6_8_7_4_1_9_2, 0.4_2_5_8_8_2_5_8, 0.4_6_1_5_0_7_2_6, 0.4_6_7_7_5_3_4, 0.4_5_5_8_3_8_3_2, 0.4_8_5_7_9_0_5_5] )
lowercase : int =np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase , 1e-3 )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
super().test_attention_slicing_forward_pass(expected_max_diff=7e-3 )
def A__ ( self : str ) -> Optional[Any]:
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=3e-3 )
def A__ ( self : str ) -> int:
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=7e-3 )
def A__ ( self : Dict ) -> int:
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=3e-3 )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
super().test_save_load_local(expected_max_difference=3e-3 )
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=3e-3 )
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : Dict =[
'''DDIMScheduler''',
'''DDPMScheduler''',
'''PNDMScheduler''',
'''HeunDiscreteScheduler''',
'''EulerAncestralDiscreteScheduler''',
'''KDPM2DiscreteScheduler''',
'''KDPM2AncestralDiscreteScheduler''',
'''DPMSolverSDEScheduler''',
]
lowercase : int =self.get_dummy_components()
lowercase : Dict =self.pipeline_class(**UpperCAmelCase )
# make sure that PNDM does not need warm-up
pipe.scheduler.register_to_config(skip_prk_steps=UpperCAmelCase )
pipe.to(UpperCAmelCase )
pipe.set_progress_bar_config(disable=UpperCAmelCase )
lowercase : int =self.get_dummy_inputs(UpperCAmelCase )
lowercase : int =2
lowercase : Optional[Any] =[]
for scheduler_enum in KarrasDiffusionSchedulers:
if scheduler_enum.name in skip_schedulers:
# no sigma schedulers are not supported
# no schedulers
continue
lowercase : Dict =getattr(UpperCAmelCase , scheduler_enum.name )
lowercase : Tuple =scheduler_cls.from_config(pipe.scheduler.config )
lowercase : Dict =pipe(**UpperCAmelCase )[0]
outputs.append(UpperCAmelCase )
assert check_same_shape(UpperCAmelCase )
@require_torch_gpu
@slow
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowercase : Tuple =torch.manual_seed(33 )
lowercase : Dict =StableDiffusionPipeline.from_pretrained('''CompVis/stable-diffusion-v1-4''' , torch_dtype=torch.floataa )
pipe.to('''cuda''' )
lowercase : List[str] =StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase : str ='''a photo of an astronaut high resolution, unreal engine, ultra realistic'''
lowercase : Dict =pipe(UpperCAmelCase , generator=UpperCAmelCase , output_type='''latent''' ).images
lowercase : Union[str, Any] =upscaler(
prompt=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase , output_type='''np''' , ).images[0]
lowercase : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/astronaut_1024.npy''' )
assert np.abs((expected_image - image).mean() ) < 5e-2
def A__ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
lowercase : str =torch.manual_seed(33 )
lowercase : Optional[int] =StableDiffusionLatentUpscalePipeline.from_pretrained(
'''stabilityai/sd-x2-latent-upscaler''' , torch_dtype=torch.floataa )
upscaler.to('''cuda''' )
lowercase : Union[str, Any] ='''the temple of fire by Ross Tran and Gerardo Dottori, oil on canvas'''
lowercase : Union[str, Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_512.png''' )
lowercase : Optional[Any] =upscaler(
prompt=UpperCAmelCase , image=UpperCAmelCase , num_inference_steps=20 , guidance_scale=0 , generator=UpperCAmelCase , output_type='''np''' , ).images[0]
lowercase : str =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/latent-upscaler/fire_temple_1024.npy''' )
assert np.abs((expected_image - image).max() ) < 5e-2
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
import math
import sys
def lowercase_ ( __A : int ) -> int:
"""simple docstring"""
if number != int(__A ):
raise ValueError('''the value of input must be a natural number''' )
if number < 0:
raise ValueError('''the value of input must not be a negative number''' )
if number == 0:
return 1
lowercase : List[str] =[-1] * (number + 1)
lowercase : Dict =0
for i in range(1 , number + 1 ):
lowercase : Union[str, Any] =sys.maxsize
lowercase : List[Any] =int(math.sqrt(__A ) )
for j in range(1 , root + 1 ):
lowercase : Optional[Any] =1 + answers[i - (j**2)]
lowercase : List[str] =min(__A , __A )
lowercase : Dict =answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Dict , UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
lowercase : str =val
lowercase : Tuple =None
lowercase : List[str] =None
def A__ ( self : Any , UpperCAmelCase : str ) -> Union[str, Any]:
'''simple docstring'''
if self.val:
if val < self.val:
if self.left is None:
lowercase : str =Node(UpperCAmelCase )
else:
self.left.insert(UpperCAmelCase )
elif val > self.val:
if self.right is None:
lowercase : List[Any] =Node(UpperCAmelCase )
else:
self.right.insert(UpperCAmelCase )
else:
lowercase : Tuple =val
def lowercase_ ( __A : Any , __A : List[Any] ) -> Tuple:
"""simple docstring"""
if root:
inorder(root.left , __A )
res.append(root.val )
inorder(root.right , __A )
def lowercase_ ( __A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if len(__A ) == 0:
return arr
lowercase : List[str] =Node(arr[0] )
for i in range(1 , len(__A ) ):
root.insert(arr[i] )
# Traverse BST in order.
lowercase : int =[]
inorder(__A , __A )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
SCREAMING_SNAKE_CASE = '\\n Text data.\n Second line of data.'
SCREAMING_SNAKE_CASE = 'file'
@pytest.fixture(scope='''session''' )
def lowercase_ ( __A : Optional[int] ) -> Dict:
"""simple docstring"""
lowercase : Union[str, Any] =tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
lowercase : Dict =bytes(__A , '''utf-8''' )
with zstd.open(__A , '''wb''' ) as f:
f.write(__A )
return path
@pytest.fixture
def lowercase_ ( __A : Any ) -> int:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir , __A ) , '''w''' ) as f:
f.write(__A )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def lowercase_ ( __A : int , __A : int , __A : Any , __A : List[Any] , __A : Dict , __A : Tuple ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] ={'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
lowercase : List[Any] =input_paths[compression_format]
lowercase : Optional[Any] =tmp_path / '''cache'''
lowercase : List[Any] =DownloadConfig(cache_dir=__A , extract_compressed_file=__A )
lowercase : List[str] =cached_path(__A , download_config=__A )
with open(__A ) as f:
lowercase : Any =f.read()
with open(__A ) as f:
lowercase : int =f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def lowercase_ ( __A : Union[str, Any] , __A : List[Any] , __A : List[Any] , __A : Optional[Any] , __A : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Dict ='''custom_cache'''
lowercase : int ='''custom_extracted_dir'''
lowercase : Optional[Any] =tmp_path / '''custom_extracted_path'''
if default_extracted:
lowercase : Tuple =('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , __A )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(__A ) )
lowercase : List[Any] =custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
lowercase : str =xz_file
lowercase : str =(
DownloadConfig(extract_compressed_file=__A )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=__A )
)
lowercase : Tuple =cached_path(__A , download_config=__A )
assert Path(__A ).parent.parts[-2:] == expected
def lowercase_ ( __A : int ) -> Any:
"""simple docstring"""
lowercase : Tuple =str(Path(__A ).resolve() )
assert cached_path(__A ) == text_file
# relative path
lowercase : Optional[Any] =str(Path(__A ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__A ) == text_file
def lowercase_ ( __A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] =str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__A ):
cached_path(__A )
# relative path
lowercase : List[str] ='''./__missing_file__.txt'''
with pytest.raises(__A ):
cached_path(__A )
def lowercase_ ( __A : Optional[Any] ) -> Tuple:
"""simple docstring"""
lowercase : Union[str, Any] =get_from_cache(F'tmp://{tmpfs_file}' )
with open(__A ) as f:
lowercase : Optional[int] =f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
with pytest.raises(__A ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def lowercase_ ( __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
http_get('''https://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def lowercase_ ( __A : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
lowercase : str =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
ftp_get('''ftp://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , __A )
def lowercase_ ( __A : List[str] ) -> Any:
"""simple docstring"""
lowercase : Tuple =tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__A ):
fsspec_get('''s3://huggingface.co''' , temp_file=__A )
with pytest.raises(__A ):
fsspec_head('''s3://huggingface.co''' )
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int = 5_0 ) -> int:
"""simple docstring"""
lowercase : str =[1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f"""{solution() = }""")
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int = 2_0_0 ) -> int:
"""simple docstring"""
lowercase : Dict =[1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0]
lowercase : Any =[0] * (pence + 1)
lowercase : Optional[int] =1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(__A , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73_682
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowercase_ ( __A : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : str =SwinConfig(image_size=1_9_2 )
if "base" in model_name:
lowercase : Dict =6
lowercase : Union[str, Any] =1_2_8
lowercase : int =(2, 2, 1_8, 2)
lowercase : str =(4, 8, 1_6, 3_2)
elif "large" in model_name:
lowercase : List[str] =1_2
lowercase : Optional[Any] =1_9_2
lowercase : int =(2, 2, 1_8, 2)
lowercase : int =(6, 1_2, 2_4, 4_8)
else:
raise ValueError('''Model not supported, only supports base and large variants''' )
lowercase : Union[str, Any] =window_size
lowercase : List[Any] =embed_dim
lowercase : Union[str, Any] =depths
lowercase : List[Any] =num_heads
return config
def lowercase_ ( __A : Tuple ) -> List[str]:
"""simple docstring"""
if "encoder.mask_token" in name:
lowercase : List[Any] =name.replace('''encoder.mask_token''' , '''embeddings.mask_token''' )
if "encoder.patch_embed.proj" in name:
lowercase : List[Any] =name.replace('''encoder.patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "encoder.patch_embed.norm" in name:
lowercase : Union[str, Any] =name.replace('''encoder.patch_embed.norm''' , '''embeddings.norm''' )
if "attn.proj" in name:
lowercase : Optional[Any] =name.replace('''attn.proj''' , '''attention.output.dense''' )
if "attn" in name:
lowercase : Dict =name.replace('''attn''' , '''attention.self''' )
if "norm1" in name:
lowercase : Any =name.replace('''norm1''' , '''layernorm_before''' )
if "norm2" in name:
lowercase : int =name.replace('''norm2''' , '''layernorm_after''' )
if "mlp.fc1" in name:
lowercase : int =name.replace('''mlp.fc1''' , '''intermediate.dense''' )
if "mlp.fc2" in name:
lowercase : List[str] =name.replace('''mlp.fc2''' , '''output.dense''' )
if name == "encoder.norm.weight":
lowercase : List[Any] ='''layernorm.weight'''
if name == "encoder.norm.bias":
lowercase : Union[str, Any] ='''layernorm.bias'''
if "decoder" in name:
pass
else:
lowercase : Optional[int] ='''swin.''' + name
return name
def lowercase_ ( __A : Optional[Any] , __A : List[Any] ) -> Any:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
lowercase : Tuple =orig_state_dict.pop(__A )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowercase : int =key.split('''.''' )
lowercase : Union[str, Any] =int(key_split[2] )
lowercase : Any =int(key_split[4] )
lowercase : str =model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowercase : Tuple =val[:dim, :]
lowercase : str =val[
dim : dim * 2, :
]
lowercase : Optional[int] =val[-dim:, :]
else:
lowercase : int =val[
:dim
]
lowercase : List[Any] =val[
dim : dim * 2
]
lowercase : Optional[Any] =val[
-dim:
]
else:
lowercase : Optional[Any] =val
return orig_state_dict
def lowercase_ ( __A : List[str] , __A : str , __A : str , __A : List[Any] ) -> int:
"""simple docstring"""
lowercase : Any =torch.load(__A , map_location='''cpu''' )['''model''']
lowercase : Optional[int] =get_swin_config(__A )
lowercase : Optional[Any] =SwinForMaskedImageModeling(__A )
model.eval()
lowercase : Optional[Any] =convert_state_dict(__A , __A )
model.load_state_dict(__A )
lowercase : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Union[str, Any] =ViTImageProcessor(size={'''height''': 1_9_2, '''width''': 1_9_2} )
lowercase : Any =Image.open(requests.get(__A , stream=__A ).raw )
lowercase : Any =image_processor(images=__A , return_tensors='''pt''' )
with torch.no_grad():
lowercase : Tuple =model(**__A ).logits
print(outputs.keys() )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__A )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__A )
if push_to_hub:
print(F'Pushing model and image processor for {model_name} to hub' )
model.push_to_hub(F'microsoft/{model_name}' )
image_processor.push_to_hub(F'microsoft/{model_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='swin-base-simmim-window6-192',
type=str,
choices=['swin-base-simmim-window6-192', 'swin-large-simmim-window12-192'],
help='Name of the Swin SimMIM model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path',
default='/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth',
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import random
def lowercase_ ( __A : int ) -> bool:
"""simple docstring"""
lowercase : Dict =num - 1
lowercase : Union[str, Any] =0
while s % 2 == 0:
lowercase : Any =s // 2
t += 1
for _ in range(5 ):
lowercase : Any =random.randrange(2 , num - 1 )
lowercase : Optional[Any] =pow(__A , __A , __A )
if v != 1:
lowercase : str =0
while v != (num - 1):
if i == t - 1:
return False
else:
lowercase : List[str] =i + 1
lowercase : Tuple =(v**2) % num
return True
def lowercase_ ( __A : int ) -> bool:
"""simple docstring"""
if num < 2:
return False
lowercase : Any =[
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(__A )
def lowercase_ ( __A : int = 1_0_2_4 ) -> int:
"""simple docstring"""
while True:
lowercase : Tuple =random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(__A ):
return num
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def lowercase_ ( __A : str=None ) -> List[str]:
"""simple docstring"""
if subparsers is not None:
lowercase : Optional[int] =subparsers.add_parser('''tpu-config''' , description=_description )
else:
lowercase : List[Any] =argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
lowercase : Dict =parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=__A , default=__A , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=__A , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=__A , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
lowercase : Any =parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=__A , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=__A )
return parser
def lowercase_ ( __A : Dict ) -> List[str]:
"""simple docstring"""
lowercase : List[str] =None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(__A ):
lowercase : List[Any] =load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
lowercase : Any =defaults.command_file
if not args.command and defaults.commands is not None:
lowercase : List[Any] =defaults.commands
if not args.tpu_name:
lowercase : Dict =defaults.tpu_name
if not args.tpu_zone:
lowercase : Dict =defaults.tpu_zone
if args.accelerate_version == "dev":
lowercase : str ='''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
lowercase : int ='''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , __A ):
lowercase : Any =F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
lowercase : List[str] =[f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , __A ):
lowercase : Union[str, Any] =[line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
lowercase : Tuple =['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
lowercase : Dict ='''; '''.join(__A )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
lowercase : int =['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(__A )}' )
return
subprocess.run(__A )
print('''Successfully setup pod.''' )
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : str =tpu_command_parser()
lowercase : Any =parser.parse_args()
tpu_command_launcher(__A )
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE = getLogger(__name__)
SCREAMING_SNAKE_CASE = 'cuda' if torch.cuda.is_available() else 'cpu'
def lowercase_ ( __A : List[str] , __A : str , __A : str , __A : int = 8 , __A : str = DEFAULT_DEVICE , __A : str=False , __A : Union[str, Any]="summarization" , __A : Tuple=None , **__A : Dict , ) -> Dict:
"""simple docstring"""
lowercase : List[str] =Path(__A ).open('''w''' , encoding='''utf-8''' )
lowercase : int =str(__A )
lowercase : Optional[Any] =AutoModelForSeqaSeqLM.from_pretrained(__A ).to(__A )
if fpaa:
lowercase : List[str] =model.half()
lowercase : str =AutoTokenizer.from_pretrained(__A )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
lowercase : Optional[int] =time.time()
# update config with task specific params
use_task_specific_params(__A , __A )
if prefix is None:
lowercase : str =prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(__A , __A ) ) ):
lowercase : Any =[prefix + text for text in examples_chunk]
lowercase : Optional[int] =tokenizer(__A , return_tensors='''pt''' , truncation=__A , padding='''longest''' ).to(__A )
lowercase : List[str] =model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__A , )
lowercase : Dict =tokenizer.batch_decode(__A , skip_special_tokens=__A , clean_up_tokenization_spaces=__A )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
lowercase : Optional[int] =int(time.time() - start_time ) # seconds
lowercase : List[Any] =len(__A )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def lowercase_ ( ) -> Optional[Any]:
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def lowercase_ ( __A : Tuple=True ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] =argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=__A , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=__A , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=__A , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=__A , required=__A , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=__A , required=__A , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=__A , required=__A , default=__A , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=__A , required=__A , default=__A , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=__A , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=__A , default=8 , required=__A , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=__A , default=-1 , required=__A , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=__A , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
lowercase : Dict =parser.parse_known_args()
lowercase : Any =parse_numeric_n_bool_cl_kwargs(__A )
if parsed_args and verbose:
print(F'parsed the following generate kwargs: {parsed_args}' )
lowercase : Tuple =[''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
lowercase : List[str] =examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__A )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
lowercase : Union[str, Any] =generate_summaries_or_translations(
__A , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__A , )
if args.reference_path is None:
return {}
# Compute scores
lowercase : int =calculate_bleu if '''translation''' in args.task else calculate_rouge
lowercase : List[str] =[x.rstrip() for x in open(args.save_path ).readlines()]
lowercase : List[Any] =[x.rstrip() for x in open(args.reference_path ).readlines()][: len(__A )]
lowercase : dict =score_fn(__A , __A )
scores.update(__A )
if args.dump_args:
scores.update(__A )
if args.info:
lowercase : Tuple =args.info
if verbose:
print(__A )
if args.score_path is not None:
json.dump(__A , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = tuple[float, float, float]
SCREAMING_SNAKE_CASE = tuple[float, float, float]
def lowercase_ ( __A : Pointad , __A : Pointad ) -> Vectorad:
"""simple docstring"""
lowercase : Optional[Any] =end_pointa[0] - end_pointa[0]
lowercase : Dict =end_pointa[1] - end_pointa[1]
lowercase : List[Any] =end_pointa[2] - end_pointa[2]
return (x, y, z)
def lowercase_ ( __A : Vectorad , __A : Vectorad ) -> Vectorad:
"""simple docstring"""
lowercase : List[str] =ab[1] * ac[2] - ab[2] * ac[1] # *i
lowercase : Optional[int] =(ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
lowercase : str =ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def lowercase_ ( __A : Vectorad , __A : int ) -> bool:
"""simple docstring"""
return tuple(round(__A , __A ) for x in vector ) == (0, 0, 0)
def lowercase_ ( __A : Pointad , __A : Pointad , __A : Pointad , __A : int = 1_0 ) -> bool:
"""simple docstring"""
lowercase : Optional[int] =create_vector(__A , __A )
lowercase : int =create_vector(__A , __A )
return is_zero_vector(get_ad_vectors_cross(__A , __A ) , __A )
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def lowercase_ ( __A : str = "AAPL" ) -> str:
"""simple docstring"""
lowercase : int =F'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}'
lowercase : List[Any] =BeautifulSoup(requests.get(__A ).text , '''html.parser''' )
lowercase : Union[str, Any] ='''My(6px) Pos(r) smartphone_Mt(6px)'''
return soup.find('''div''' , class_=class_ ).find('''span''' ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f"""Current {symbol:<4} stock price is {stock_price(symbol):>8}""")
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE = TypeVar('T')
class UpperCAmelCase_ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , UpperCAmelCase : bool = True ) -> None:
'''simple docstring'''
lowercase : dict[T, list[T]] ={} # dictionary of lists
lowercase : Dict =directed
def A__ ( self : List[str] , UpperCAmelCase : T , UpperCAmelCase : T ) -> GraphAdjacencyList[T]:
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
self.adj_list[destination_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
lowercase : Tuple =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(UpperCAmelCase )
lowercase : Any =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
lowercase : int =[destination_vertex]
lowercase : List[str] =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(UpperCAmelCase )
lowercase : List[Any] =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
lowercase : Optional[int] =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
lowercase : Union[str, Any] =[destination_vertex]
lowercase : int =[]
return self
def __repr__( self : str ) -> str:
'''simple docstring'''
return pformat(self.adj_list )
| 707 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''LayoutLMv2ImageProcessor'''
UpperCamelCase_ = ('''LayoutXLMTokenizer''', '''LayoutXLMTokenizerFast''')
def __init__( self : List[str] , UpperCAmelCase : Tuple=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase , )
lowercase : Any =kwargs.pop('''feature_extractor''' )
lowercase : Dict =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase , UpperCAmelCase )
def __call__( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , UpperCAmelCase : Union[List[List[int]], List[List[List[int]]]] = None , UpperCAmelCase : Optional[Union[List[int], List[List[int]]]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 0 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : Optional[bool] = None , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[str, TensorType]] = None , **UpperCAmelCase : Any , ) -> BatchEncoding:
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
lowercase : Tuple =self.image_processor(images=UpperCAmelCase , return_tensors=UpperCAmelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Optional[Any] =[text] # add batch dimension (as the image processor always adds a batch dimension)
lowercase : List[str] =features['''words''']
lowercase : Optional[Any] =self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=UpperCAmelCase , add_special_tokens=UpperCAmelCase , padding=UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , stride=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_token_type_ids=UpperCAmelCase , return_attention_mask=UpperCAmelCase , return_overflowing_tokens=UpperCAmelCase , return_special_tokens_mask=UpperCAmelCase , return_offsets_mapping=UpperCAmelCase , return_length=UpperCAmelCase , verbose=UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase , )
# add pixel values
lowercase : List[str] =features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
lowercase : str =self.get_overflowing_images(UpperCAmelCase , encoded_inputs['''overflow_to_sample_mapping'''] )
lowercase : Dict =images
return encoded_inputs
def A__ ( self : Any , UpperCAmelCase : Optional[Any] , UpperCAmelCase : str ) -> str:
'''simple docstring'''
lowercase : str =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f' {len(UpperCAmelCase )} and {len(UpperCAmelCase )}' )
return images_with_overflow
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase , )
return self.image_processor_class
@property
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase , )
return self.image_processor
| 8 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
SCREAMING_SNAKE_CASE = '2020.9.26'
SCREAMING_SNAKE_CASE = 'xcodz-dot, cclaus, dhruvmanila'
def lowercase_ ( __A : float , __A : float , __A : float , __A : float , __A : float ) -> tuple[float, float]:
"""simple docstring"""
if not all(isinstance(__A , (float, int) ) for val in locals().values() ):
lowercase : Dict =F'Input values must either be float or int: {list(locals().values() )}'
raise TypeError(__A )
lowercase : int =((x * distance) / (z + distance)) * scale
lowercase : List[Any] =((y * distance) / (z + distance)) * scale
return projected_x, projected_y
def lowercase_ ( __A : float , __A : float , __A : float , __A : str , __A : float ) -> tuple[float, float, float]:
"""simple docstring"""
if not isinstance(__A , __A ):
raise TypeError('''Axis must be a str''' )
lowercase : Dict =locals()
del input_variables["axis"]
if not all(isinstance(__A , (float, int) ) for val in input_variables.values() ):
lowercase : Tuple =(
'''Input values except axis must either be float or int: '''
F'{list(input_variables.values() )}'
)
raise TypeError(__A )
lowercase : Optional[Any] =(angle % 3_6_0) / 4_5_0 * 1_8_0 / math.pi
if axis == "z":
lowercase : List[Any] =x * math.cos(__A ) - y * math.sin(__A )
lowercase : List[str] =y * math.cos(__A ) + x * math.sin(__A )
lowercase : Union[str, Any] =z
elif axis == "x":
lowercase : int =y * math.cos(__A ) - z * math.sin(__A )
lowercase : Optional[Any] =z * math.cos(__A ) + y * math.sin(__A )
lowercase : Any =x
elif axis == "y":
lowercase : Any =x * math.cos(__A ) - z * math.sin(__A )
lowercase : Any =z * math.cos(__A ) + x * math.sin(__A )
lowercase : Optional[int] =y
else:
raise ValueError('''not a valid axis, choose one of \'x\', \'y\', \'z\'''' )
return new_x, new_y, new_z
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"""{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }""")
print(f"""{rotate(1.0, 2.0, 3.0, 'y', 90.0) = }""")
| 708 |
'''simple docstring'''
def lowercase_ ( __A : int = 6_0_0_8_5_1_4_7_5_1_4_3 ) -> int:
"""simple docstring"""
try:
lowercase : Any =int(__A )
except (TypeError, ValueError):
raise TypeError('''Parameter n must be int or castable to int.''' )
if n <= 0:
raise ValueError('''Parameter n must be greater than or equal to one.''' )
lowercase : Optional[Any] =1
lowercase : Dict =2
while i * i <= n:
while n % i == 0:
lowercase : Optional[int] =i
n //= i
i += 1
if n > 1:
lowercase : Dict =n
return int(__A )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 8 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 709 |
'''simple docstring'''
from __future__ import annotations
import math
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
lowercase : str =u
for i in range(1 , __A ):
lowercase : Any =temp * (u - i)
return temp
def lowercase_ ( ) -> None:
"""simple docstring"""
lowercase : List[str] =int(input('''enter the numbers of values: ''' ) )
lowercase : list[list[float]] =[]
for _ in range(__A ):
y.append([] )
for i in range(__A ):
for j in range(__A ):
y[i].append(__A )
lowercase : List[Any] =0
print('''enter the values of parameters in a list: ''' )
lowercase : Optional[int] =list(map(__A , input().split() ) )
print('''enter the values of corresponding parameters: ''' )
for i in range(__A ):
lowercase : str =float(input() )
lowercase : int =int(input('''enter the value to interpolate: ''' ) )
lowercase : Union[str, Any] =(value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __A ):
for j in range(n - i ):
lowercase : str =y[j + 1][i - 1] - y[j][i - 1]
lowercase : Any =y[0][0]
for i in range(1 , __A ):
summ += (ucal(__A , __A ) * y[0][i]) / math.factorial(__A )
print(F'the value at {value} is {summ}' )
if __name__ == "__main__":
main()
| 8 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : Dict ) -> int:
'''simple docstring'''
lowercase : List[Any] =inspect.getfile(accelerate.test_utils )
lowercase : int =os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_script.py'''] )
lowercase : Any =os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : Union[str, Any] =f'\n {self.test_dir}/xla_spawn.py\n --num_cores 8\n {self.test_file_path}\n '.split()
lowercase : Union[str, Any] =[sys.executable] + distributed_args
execute_subprocess_async(UpperCAmelCase , env=os.environ.copy() )
| 710 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =0
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Tuple =AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : str =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : int =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : int =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : str =CLIPConfig()
# Create a dummy config file with image_proceesor_type
lowercase : Optional[int] =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : Optional[Any] =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase ).to_dict()
config_dict.pop('''image_processor_type''' )
lowercase : str =CLIPImageProcessor(**UpperCAmelCase )
# save in new folder
model_config.save_pretrained(UpperCAmelCase )
config.save_pretrained(UpperCAmelCase )
lowercase : Optional[int] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
# make sure private variable is not incorrectly saved
lowercase : int =json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Dict =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
lowercase : Optional[Any] =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
def A__ ( self : int ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''clip-base is not a local folder and is not a valid model identifier''' ):
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained('''clip-base''' )
def A__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , revision='''aaaaaa''' )
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
with self.assertRaisesRegex(
UpperCAmelCase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
lowercase : Optional[int] =AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def A__ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaises(UpperCAmelCase ):
lowercase : Dict =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(UpperCAmelCase ):
lowercase : List[str] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
lowercase : Union[str, Any] =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Any =AutoImageProcessor.from_pretrained(UpperCAmelCase , trust_remote_code=UpperCAmelCase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(UpperCAmelCase ):
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase : Any =Path(UpperCAmelCase ) / '''preprocessor_config.json'''
lowercase : str =Path(UpperCAmelCase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(UpperCAmelCase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(UpperCAmelCase , '''w''' ) )
lowercase : Optional[int] =CustomImageProcessor.from_pretrained(UpperCAmelCase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(UpperCAmelCase )
lowercase : Dict =AutoImageProcessor.from_pretrained(UpperCAmelCase )
self.assertIsInstance(UpperCAmelCase , UpperCAmelCase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def A__ ( self : Any ) -> Any:
'''simple docstring'''
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = True
try:
AutoConfig.register('''custom''' , UpperCAmelCase )
AutoImageProcessor.register(UpperCAmelCase , UpperCAmelCase )
# If remote code is not set, the default is to use local
lowercase : List[str] =AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
lowercase : Tuple =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
lowercase : Dict =AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=UpperCAmelCase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(UpperCAmelCase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 8 | 0 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , __A , )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = RobertaConfig
UpperCamelCase_ = '''roberta'''
def __init__( self : Union[str, Any] , UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
lowercase : List[str] =RobertaEmbeddings(UpperCAmelCase )
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , __A , )
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = RobertaConfig
UpperCamelCase_ = '''roberta'''
def __init__( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(UpperCAmelCase )
lowercase : Optional[int] =config.num_labels
lowercase : List[Any] =config.num_hidden_layers
lowercase : Any =DeeRobertaModel(UpperCAmelCase )
lowercase : str =nn.Dropout(config.hidden_dropout_prob )
lowercase : int =nn.Linear(config.hidden_size , self.config.num_labels )
@add_start_docstrings_to_model_forward(UpperCAmelCase )
def A__ ( self : List[Any] , UpperCAmelCase : List[Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : int=None , UpperCAmelCase : List[str]=None , UpperCAmelCase : int=None , UpperCAmelCase : Any=None , UpperCAmelCase : str=None , UpperCAmelCase : Any=-1 , UpperCAmelCase : Union[str, Any]=False , ) -> Dict:
'''simple docstring'''
lowercase : List[str] =self.num_layers
try:
lowercase : Tuple =self.roberta(
UpperCAmelCase , attention_mask=UpperCAmelCase , token_type_ids=UpperCAmelCase , position_ids=UpperCAmelCase , head_mask=UpperCAmelCase , inputs_embeds=UpperCAmelCase , )
lowercase : Optional[Any] =outputs[1]
lowercase : Any =self.dropout(UpperCAmelCase )
lowercase : int =self.classifier(UpperCAmelCase )
lowercase : int =(logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
lowercase : List[str] =e.message
lowercase : Optional[Any] =e.exit_layer
lowercase : Any =outputs[0]
if not self.training:
lowercase : List[str] =entropy(UpperCAmelCase )
lowercase : int =[]
lowercase : Optional[Any] =[]
if labels is not None:
if self.num_labels == 1:
# We are doing regression
lowercase : Dict =MSELoss()
lowercase : Any =loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase : str =CrossEntropyLoss()
lowercase : Any =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
lowercase : List[Any] =[]
for highway_exit in outputs[-1]:
lowercase : Any =highway_exit[0]
if not self.training:
highway_logits_all.append(UpperCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
lowercase : Union[str, Any] =MSELoss()
lowercase : Optional[Any] =loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
lowercase : Tuple =CrossEntropyLoss()
lowercase : List[Any] =loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(UpperCAmelCase )
if train_highway:
lowercase : int =(sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
lowercase : Any =(loss,) + outputs
if not self.training:
lowercase : Union[str, Any] =outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
lowercase : Tuple =(
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 711 |
'''simple docstring'''
from __future__ import annotations
SCREAMING_SNAKE_CASE = 8.988E9 # units = N * m^s * C^-2
def lowercase_ ( __A : float , __A : float , __A : float , __A : float ) -> dict[str, float]:
"""simple docstring"""
lowercase : Dict =abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if distance < 0:
raise ValueError('''Distance cannot be negative''' )
if force == 0:
lowercase : Union[str, Any] =COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
lowercase : int =abs(__A ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
lowercase : Tuple =(COULOMBS_CONSTANT * charge_product / abs(__A )) ** 0.5
return {"distance": distance}
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : str , __A : str ) -> int:
"""simple docstring"""
if len(__A ) != len(__A ):
raise ValueError('''String lengths must match!''' )
lowercase : List[Any] =0
for chara, chara in zip(__A , __A ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'allenai/led-base-16384': 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowercase_ ( ) -> Any:
"""simple docstring"""
lowercase : int =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
lowercase : Union[str, Any] =bs[:]
lowercase : Tuple =0
for b in range(2**8 ):
if b not in bs:
bs.append(__A )
cs.append(2**8 + n )
n += 1
lowercase : Optional[Any] =[chr(__A ) for n in cs]
return dict(zip(__A , __A ) )
def lowercase_ ( __A : str ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =set()
lowercase : Tuple =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowercase : List[str] =char
return pairs
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
def __init__( self : Tuple , UpperCAmelCase : int , UpperCAmelCase : str , UpperCAmelCase : str="replace" , UpperCAmelCase : int="<s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : Optional[int]="</s>" , UpperCAmelCase : List[Any]="<s>" , UpperCAmelCase : str="<unk>" , UpperCAmelCase : Dict="<pad>" , UpperCAmelCase : Union[str, Any]="<mask>" , UpperCAmelCase : str=False , **UpperCAmelCase : int , ) -> Dict:
'''simple docstring'''
lowercase : int =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else bos_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else eos_token
lowercase : str =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else sep_token
lowercase : Optional[int] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else cls_token
lowercase : Union[str, Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else unk_token
lowercase : List[Any] =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
errors=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , unk_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
with open(UpperCAmelCase , encoding='''utf-8''' ) as vocab_handle:
lowercase : str =json.load(UpperCAmelCase )
lowercase : Optional[int] ={v: k for k, v in self.encoder.items()}
lowercase : Optional[int] =errors # how to handle errors in decoding
lowercase : Tuple =bytes_to_unicode()
lowercase : int ={v: k for k, v in self.byte_encoder.items()}
with open(UpperCAmelCase , encoding='''utf-8''' ) as merges_handle:
lowercase : Union[str, Any] =merges_handle.read().split('''\n''' )[1:-1]
lowercase : Optional[Any] =[tuple(merge.split() ) for merge in bpe_merges]
lowercase : Optional[int] =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
lowercase : Optional[int] ={}
lowercase : Any =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
lowercase : str =re.compile(R'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return len(self.encoder )
def A__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self : int , UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : List[str] =get_pairs(UpperCAmelCase )
if not pairs:
return token
while True:
lowercase : Tuple =min(UpperCAmelCase , key=lambda UpperCAmelCase : self.bpe_ranks.get(UpperCAmelCase , float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
lowercase , lowercase : Optional[int] =bigram
lowercase : Union[str, Any] =[]
lowercase : Optional[Any] =0
while i < len(UpperCAmelCase ):
try:
lowercase : Dict =word.index(UpperCAmelCase , UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowercase : Optional[int] =j
if word[i] == first and i < len(UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowercase : List[str] =tuple(UpperCAmelCase )
lowercase : str =new_word
if len(UpperCAmelCase ) == 1:
break
else:
lowercase : Optional[Any] =get_pairs(UpperCAmelCase )
lowercase : Optional[Any] =''' '''.join(UpperCAmelCase )
lowercase : Union[str, Any] =word
return word
def A__ ( self : int , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Dict =[]
for token in re.findall(self.pat , UpperCAmelCase ):
lowercase : Optional[int] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCAmelCase ).split(''' ''' ) )
return bpe_tokens
def A__ ( self : Union[str, Any] , UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return self.encoder.get(UpperCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self : Dict , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
return self.decoder.get(UpperCAmelCase )
def A__ ( self : List[str] , UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =''''''.join(UpperCAmelCase )
lowercase : Dict =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''' , errors=self.errors )
return text
def A__ ( self : Any , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
lowercase : List[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCAmelCase , ensure_ascii=UpperCAmelCase ) + '''\n''' )
lowercase : List[str] =0
with open(UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
''' Please check that the tokenizer is not corrupted!''' )
lowercase : Any =token_index
writer.write(''' '''.join(UpperCAmelCase ) + '''\n''' )
index += 1
return vocab_file, merge_file
def A__ ( self : str , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : List[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1, 1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Dict =[self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int]=False , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
lowercase : Tuple =kwargs.pop('''add_prefix_space''' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(UpperCAmelCase ) > 0 and not text[0].isspace()):
lowercase : Union[str, Any] =''' ''' + text
return (text, kwargs)
def A__ ( self : Any , UpperCAmelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[bool] = None , ) -> dict:
'''simple docstring'''
lowercase : Optional[int] =super()._pad(
encoded_inputs=UpperCAmelCase , max_length=UpperCAmelCase , padding_strategy=UpperCAmelCase , pad_to_multiple_of=UpperCAmelCase , return_attention_mask=UpperCAmelCase , )
# Load from model defaults
if return_attention_mask is None:
lowercase : Tuple ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowercase : Optional[Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowercase : str =len(encoded_inputs['''global_attention_mask'''] ) != len(UpperCAmelCase )
if needs_to_be_padded:
lowercase : Tuple =len(UpperCAmelCase ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowercase : List[str] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
lowercase : Any =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 8 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = 42
class UpperCAmelCase_ ( __A , __A ):
"""simple docstring"""
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 3 , UpperCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , UpperCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , UpperCAmelCase : Tuple[int] = (64,) , UpperCAmelCase : int = 1 , UpperCAmelCase : str = "silu" , UpperCAmelCase : int = 3 , UpperCAmelCase : int = 32 , UpperCAmelCase : int = 256 , UpperCAmelCase : int = 32 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : float = 0.1_8_2_1_5 , UpperCAmelCase : str = "group" , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
lowercase : Union[str, Any] =Encoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , down_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , double_z=UpperCAmelCase , )
lowercase : Union[str, Any] =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowercase : Any =nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
lowercase : Optional[Any] =VectorQuantizer(UpperCAmelCase , UpperCAmelCase , beta=0.2_5 , remap=UpperCAmelCase , sane_index_shape=UpperCAmelCase )
lowercase : List[Any] =nn.Convad(UpperCAmelCase , UpperCAmelCase , 1 )
# pass init params to Decoder
lowercase : Union[str, Any] =Decoder(
in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , up_block_types=UpperCAmelCase , block_out_channels=UpperCAmelCase , layers_per_block=UpperCAmelCase , act_fn=UpperCAmelCase , norm_num_groups=UpperCAmelCase , norm_type=UpperCAmelCase , )
@apply_forward_hook
def A__ ( self : Tuple , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True ) -> VQEncoderOutput:
'''simple docstring'''
lowercase : Tuple =self.encoder(UpperCAmelCase )
lowercase : Any =self.quant_conv(UpperCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=UpperCAmelCase )
@apply_forward_hook
def A__ ( self : int , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = False , UpperCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
lowercase : Tuple =self.quantize(UpperCAmelCase )
else:
lowercase : Dict =h
lowercase : Tuple =self.post_quant_conv(UpperCAmelCase )
lowercase : List[Any] =self.decoder(UpperCAmelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
def A__ ( self : Optional[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
lowercase : int =sample
lowercase : Dict =self.encode(UpperCAmelCase ).latents
lowercase : Optional[Any] =self.decode(UpperCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=UpperCAmelCase )
| 713 |
'''simple docstring'''
import mpmath # for roots of unity
import numpy as np
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Dict=None , UpperCAmelCase : Optional[int]=None ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =list(poly_a or [0] )[:]
lowercase : Dict =list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowercase : int =len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowercase : List[str] =len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowercase : Tuple =int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowercase : Optional[int] =complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowercase : str =self.__multiply()
def A__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
lowercase : Tuple =[[x] for x in self.polyA] if which == '''A''' else [[x] for x in self.polyB]
# Corner case
if len(UpperCAmelCase ) <= 1:
return dft[0]
#
lowercase : List[Any] =self.c_max_length // 2
while next_ncol > 0:
lowercase : str =[[] for i in range(UpperCAmelCase )]
lowercase : List[str] =self.root**next_ncol
# First half of next step
lowercase : Union[str, Any] =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowercase : Any =1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(UpperCAmelCase ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowercase : Tuple =new_dft
lowercase : List[Any] =next_ncol // 2
return dft[0]
def A__ ( self : int ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.__dft('''A''' )
lowercase : Union[str, Any] =self.__dft('''B''' )
lowercase : Any =[[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowercase : Tuple =2
while next_ncol <= self.c_max_length:
lowercase : Tuple =[[] for i in range(UpperCAmelCase )]
lowercase : Tuple =self.root ** (next_ncol // 2)
lowercase : Optional[int] =1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowercase : List[Any] =new_inverse_c
next_ncol *= 2
# Unpack
lowercase : List[str] =[round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Dict ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''A = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowercase : List[str] ='''B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowercase : Optional[Any] ='''A*B = ''' + ''' + '''.join(
f'{coef}*x^{i}' for coef, i in enumerate(self.product ) )
return f'{a}\n{b}\n{c}'
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''umt5'''
UpperCamelCase_ = ['''past_key_values''']
def __init__( self : Optional[Any] , UpperCAmelCase : Optional[int]=25_0112 , UpperCAmelCase : str=512 , UpperCAmelCase : Optional[int]=64 , UpperCAmelCase : List[Any]=1024 , UpperCAmelCase : Tuple=8 , UpperCAmelCase : List[Any]=None , UpperCAmelCase : int=6 , UpperCAmelCase : Dict=32 , UpperCAmelCase : List[str]=128 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Optional[int]=1e-6 , UpperCAmelCase : Optional[int]=1.0 , UpperCAmelCase : Optional[Any]="gated-gelu" , UpperCAmelCase : str=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : List[str]="T5Tokenizer" , UpperCAmelCase : int=True , UpperCAmelCase : Tuple=0 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : int=0 , **UpperCAmelCase : Union[str, Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(
is_encoder_decoder=UpperCAmelCase , tokenizer_class=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Optional[Any] =vocab_size
lowercase : List[Any] =d_model
lowercase : Optional[Any] =d_kv
lowercase : Any =d_ff
lowercase : Union[str, Any] =num_layers
lowercase : Optional[int] =(
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase : int =num_heads
lowercase : Tuple =relative_attention_num_buckets
lowercase : int =relative_attention_max_distance
lowercase : Tuple =dropout_rate
lowercase : List[str] =layer_norm_epsilon
lowercase : Union[str, Any] =initializer_factor
lowercase : int =feed_forward_proj
lowercase : List[Any] =use_cache
lowercase : List[Any] =self.feed_forward_proj.split('''-''' )
lowercase : Dict =act_info[-1]
lowercase : Tuple =act_info[0] == '''gated'''
if len(UpperCAmelCase ) > 1 and act_info[0] != "gated" or len(UpperCAmelCase ) > 2:
raise ValueError(
f'`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
lowercase : List[Any] ='''gelu_new'''
@property
def A__ ( self : Any ) -> List[Any]:
'''simple docstring'''
return self.d_model
@property
def A__ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
return self.num_heads
@property
def A__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
return self.num_layers
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def A__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
lowercase : List[str] ={
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
lowercase : Union[str, Any] ='''past_encoder_sequence + sequence'''
lowercase : Tuple ={0: '''batch'''}
lowercase : Any ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
lowercase : Dict ={0: '''batch''', 1: '''decoder_sequence'''}
lowercase : List[str] ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def A__ ( self : List[Any] ) -> int:
'''simple docstring'''
return 13
@property
def A__ ( self : Union[str, Any] ) -> float:
'''simple docstring'''
return 5e-4
| 714 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : List[Any] =str(bin(__A ) )
binary_number += "0" * shift_amount
return binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number < 0 or shift_amount < 0:
raise ValueError('''both inputs must be positive integers''' )
lowercase : Union[str, Any] =str(bin(__A ) )[2:]
if shift_amount >= len(__A ):
return "0b0"
lowercase : Any =binary_number[: len(__A ) - shift_amount]
return "0b" + shifted_binary_number
def lowercase_ ( __A : int , __A : int ) -> str:
"""simple docstring"""
if number >= 0: # Get binary representation of positive number
lowercase : str ='''0''' + str(bin(__A ) ).strip('''-''' )[2:]
else: # Get binary (2's complement) representation of negative number
lowercase : Dict =len(bin(__A )[3:] ) # Find 2's complement of number
lowercase : Optional[Any] =bin(abs(__A ) - (1 << binary_number_length) )[3:]
lowercase : int =(
'''1''' + '''0''' * (binary_number_length - len(__A )) + binary_number
)
if shift_amount >= len(__A ):
return "0b" + binary_number[0] * len(__A )
return (
"0b"
+ binary_number[0] * shift_amount
+ binary_number[: len(__A ) - shift_amount]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 8 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : int , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[str] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : str ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : str , **UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : List[str] , **UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : str , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : List[str] , **UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : int , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[int] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : List[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : int , *UpperCAmelCase : int , **UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
def lowercase_ ( *__A : List[Any] , **__A : Tuple ) -> Dict:
"""simple docstring"""
requires_backends(__A , ['''torch'''] )
def lowercase_ ( *__A : Union[str, Any] , **__A : Dict ) -> str:
"""simple docstring"""
requires_backends(__A , ['''torch'''] )
def lowercase_ ( *__A : Any , **__A : Dict ) -> Any:
"""simple docstring"""
requires_backends(__A , ['''torch'''] )
def lowercase_ ( *__A : Dict , **__A : Dict ) -> Tuple:
"""simple docstring"""
requires_backends(__A , ['''torch'''] )
def lowercase_ ( *__A : Tuple , **__A : str ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(__A , ['''torch'''] )
def lowercase_ ( *__A : Tuple , **__A : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
requires_backends(__A , ['''torch'''] )
def lowercase_ ( *__A : Any , **__A : str ) -> Dict:
"""simple docstring"""
requires_backends(__A , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : int , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : Tuple , **UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Dict ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase : int , **UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : Tuple , **UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Any , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : int ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : int , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : int , *UpperCAmelCase : int , **UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : str , **UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : str ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[str] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[str] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Any , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Union[str, Any] ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : Any , **UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : int , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : int ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : int ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : str ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : int , *UpperCAmelCase : int , **UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[int] ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : int ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Any , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : List[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : int ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : List[Any] ) -> int:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : str , **UpperCAmelCase : int ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : str ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Optional[Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Union[str, Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : List[Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Tuple ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Union[str, Any] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : Tuple , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Any , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : List[Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : List[Any] , **UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase : str , **UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : List[str] , **UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : Dict , **UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Tuple , **UpperCAmelCase : int ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : Dict , **UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : List[str] , **UpperCAmelCase : Optional[int] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : List[str] , **UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Union[str, Any] ) -> Any:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Tuple ) -> str:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Tuple , *UpperCAmelCase : str , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[int] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Any ) -> Tuple:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : str , **UpperCAmelCase : str ) -> int:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : Dict , **UpperCAmelCase : Optional[int] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase : Dict , **UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : Dict , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : str , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[Any] ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Dict , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : str ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : List[str] , *UpperCAmelCase : int , **UpperCAmelCase : Tuple ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Tuple , *UpperCAmelCase : int , **UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : Dict ) -> List[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[Any] , *UpperCAmelCase : int , **UpperCAmelCase : Any ) -> Optional[Any]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[Any] , *UpperCAmelCase : Optional[int] , **UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : Optional[int] , *UpperCAmelCase : List[Any] , **UpperCAmelCase : str ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Any , *UpperCAmelCase : str , **UpperCAmelCase : List[Any] ) -> Dict:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : List[str] , *UpperCAmelCase : Tuple , **UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : List[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Any ) -> List[str]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Dict , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
class UpperCAmelCase_ ( metaclass=__A ):
"""simple docstring"""
UpperCamelCase_ = ['''torch''']
def __init__( self : Optional[Any] , *UpperCAmelCase : Any , **UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
requires_backends(self , ['''torch'''] )
@classmethod
def A__ ( cls : int , *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
@classmethod
def A__ ( cls : Union[str, Any] , *UpperCAmelCase : Optional[Any] , **UpperCAmelCase : Optional[int] ) -> Tuple:
'''simple docstring'''
requires_backends(cls , ['''torch'''] )
| 715 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def A__ ( *UpperCAmelCase : Union[str, Any] , **UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
pass
def lowercase_ ( __A : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
SCREAMING_SNAKE_CASE = (
'https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'
)
@is_pipeline_test
@require_torch
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def A__ ( self : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : str , UpperCAmelCase : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Dict =pipeline(
'''document-question-answering''' , model=UpperCAmelCase , tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase )
lowercase : Optional[Any] =INVOICE_URL
lowercase : Any =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
lowercase : Dict ='''What is the placebo?'''
lowercase : Optional[Any] =[
{
'''image''': load_image(UpperCAmelCase ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def A__ ( self : Any , UpperCAmelCase : Optional[int] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =dqa_pipeline(UpperCAmelCase , top_k=2 )
self.assertEqual(
UpperCAmelCase , [
[
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
{'''score''': ANY(UpperCAmelCase ), '''answer''': ANY(UpperCAmelCase ), '''start''': ANY(UpperCAmelCase ), '''end''': ANY(UpperCAmelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Dict =pipeline('''document-question-answering''' , model='''hf-internal-testing/tiny-random-layoutlmv2''' )
lowercase : Union[str, Any] =INVOICE_URL
lowercase : Tuple ='''How many cats are there?'''
lowercase : Optional[int] =[
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
lowercase : Optional[Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
lowercase : List[str] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , UpperCAmelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowercase : Optional[Any] ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Any =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowercase : int ='''./tests/fixtures/tests_samples/COCO/000000039769.png'''
lowercase : Dict =[]
lowercase : str =[]
lowercase : str =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , words=UpperCAmelCase , boxes=UpperCAmelCase , top_k=2 )
self.assertEqual(UpperCAmelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : int ) -> Any:
'''simple docstring'''
lowercase : Union[str, Any] =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , )
lowercase : Dict =INVOICE_URL
lowercase : str ='''What is the invoice number?'''
lowercase : List[str] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : List[Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_4_4, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_0_0_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa''' , revision='''9977165''' , max_seq_len=50 , )
lowercase : Dict =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Union[str, Any] =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : int =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Any =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_7_4, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
{'''score''': 0.9_9_4_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : str ) -> Dict:
'''simple docstring'''
lowercase : Any =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : Tuple =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , )
lowercase : Tuple =INVOICE_URL
lowercase : Any ='''What is the invoice number?'''
lowercase : Dict =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : Any =dqa_pipeline({'''image''': image, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
lowercase : str =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
]
]
* 2 , )
lowercase : Tuple =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Dict =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.4_2_5_1, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.0_8_1_9, '''answer''': '''1110212019''', '''start''': 23, '''end''': 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : Dict =AutoTokenizer.from_pretrained(
'''impira/layoutlm-document-qa''' , revision='''3dc6de3''' , add_prefix_space=UpperCAmelCase )
lowercase : List[Any] =pipeline(
'''document-question-answering''' , model='''impira/layoutlm-document-qa''' , tokenizer=UpperCAmelCase , revision='''3dc6de3''' , max_seq_len=50 , )
lowercase : str =INVOICE_URL
lowercase : int ='''What is the invoice number?'''
lowercase : Tuple =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
lowercase : Union[str, Any] =dqa_pipeline(
[{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
[
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
]
]
* 2 , )
lowercase : List[str] =list(zip(*apply_tesseract(load_image(UpperCAmelCase ) , UpperCAmelCase , '''''' ) ) )
# This model should also work if `image` is set to None
lowercase : Union[str, Any] =dqa_pipeline({'''image''': None, '''word_boxes''': word_boxes, '''question''': question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase , decimals=4 ) , [
{'''score''': 0.9_9_9_9, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
{'''score''': 0.9_9_9_8, '''answer''': '''us-001''', '''start''': 16, '''end''': 16},
] , )
@slow
@require_torch
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : str =pipeline(
'''document-question-answering''' , model='''naver-clova-ix/donut-base-finetuned-docvqa''' , tokenizer=AutoTokenizer.from_pretrained('''naver-clova-ix/donut-base-finetuned-docvqa''' ) , feature_extractor='''naver-clova-ix/donut-base-finetuned-docvqa''' , )
lowercase : Any =INVOICE_URL
lowercase : Union[str, Any] ='''What is the invoice number?'''
lowercase : int =dqa_pipeline(image=UpperCAmelCase , question=UpperCAmelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCAmelCase , decimals=4 ) , [{'''answer''': '''us-001'''}] )
@require_tf
@unittest.skip('''Document question answering not implemented in TF''' )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
pass
| 8 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : Optional[int]=13 , UpperCAmelCase : str=32 , UpperCAmelCase : Any=2 , UpperCAmelCase : Dict=3 , UpperCAmelCase : str=16 , UpperCAmelCase : Union[str, Any]=[1, 2, 1] , UpperCAmelCase : List[str]=[2, 2, 4] , UpperCAmelCase : Tuple=2 , UpperCAmelCase : Tuple=2.0 , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : List[str]=0.1 , UpperCAmelCase : str="gelu" , UpperCAmelCase : Dict=False , UpperCAmelCase : Tuple=True , UpperCAmelCase : str=0.0_2 , UpperCAmelCase : List[str]=1e-5 , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : Dict=10 , UpperCAmelCase : Dict=8 , UpperCAmelCase : str=["stage1", "stage2", "stage3"] , UpperCAmelCase : List[str]=[1, 2, 3] , ) -> Any:
'''simple docstring'''
lowercase : int =parent
lowercase : List[str] =batch_size
lowercase : List[str] =image_size
lowercase : Optional[int] =patch_size
lowercase : int =num_channels
lowercase : Union[str, Any] =embed_dim
lowercase : Tuple =depths
lowercase : List[Any] =num_heads
lowercase : int =window_size
lowercase : int =mlp_ratio
lowercase : Optional[Any] =qkv_bias
lowercase : List[Any] =hidden_dropout_prob
lowercase : Optional[int] =attention_probs_dropout_prob
lowercase : Any =drop_path_rate
lowercase : Optional[int] =hidden_act
lowercase : Tuple =use_absolute_embeddings
lowercase : Union[str, Any] =patch_norm
lowercase : str =layer_norm_eps
lowercase : List[str] =initializer_range
lowercase : Optional[int] =is_training
lowercase : str =scope
lowercase : Tuple =use_labels
lowercase : List[str] =type_sequence_label_size
lowercase : Optional[int] =encoder_stride
lowercase : str =out_features
lowercase : Optional[Any] =out_indices
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase : str =None
if self.use_labels:
lowercase : List[str] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[str] =self.get_config()
return config, pixel_values, labels
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def A__ ( self : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Any ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Any =MaskFormerSwinModel(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : Dict =model(UpperCAmelCase )
lowercase : Optional[Any] =((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowercase : List[str] =int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def A__ ( self : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : Any =MaskFormerSwinBackbone(config=UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : str =model(UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(UpperCAmelCase ):
lowercase : Dict =['''stem''']
lowercase : str =MaskFormerSwinBackbone(config=UpperCAmelCase )
def A__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
lowercase : Tuple =self.prepare_config_and_inputs()
lowercase : List[Any] =config_and_inputs
lowercase : Any ={'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : Optional[Any] =MaskFormerSwinModelTester(self )
lowercase : List[str] =ConfigTester(self , config_class=UpperCAmelCase , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'''`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'''
''' `nn.DataParallel`'''
) )
def A__ ( self : Any ) -> str:
'''simple docstring'''
pass
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return
def A__ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCAmelCase )
@unittest.skip('''Swin does not use inputs_embeds''' )
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip('''Swin does not support feedforward chunking''' )
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
pass
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] =model_class(UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase : int =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCAmelCase , nn.Linear ) )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase : Optional[Any] =model_class(UpperCAmelCase )
lowercase : List[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase : List[Any] =[*signature.parameters.keys()]
lowercase : Optional[Any] =['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCAmelCase )
@unittest.skip(reason='''MaskFormerSwin is only used as backbone and doesn\'t support output_attentions''' )
def A__ ( self : str ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason='''MaskFormerSwin is only used as an internal backbone''' )
def A__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
pass
def A__ ( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Tuple , UpperCAmelCase : List[str] , UpperCAmelCase : Dict ) -> Any:
'''simple docstring'''
lowercase : Tuple =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
with torch.no_grad():
lowercase : Union[str, Any] =model(**self._prepare_for_class(UpperCAmelCase , UpperCAmelCase ) )
lowercase : Tuple =outputs.hidden_states
lowercase : Union[str, Any] =getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCAmelCase ) , UpperCAmelCase )
# Swin has a different seq_length
lowercase : int =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase : Dict =(image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def A__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Dict =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowercase : List[Any] =True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Union[str, Any] =True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def A__ ( self : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : List[str] =3
lowercase : List[Any] =(
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowercase : List[str] =(
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowercase : Optional[Any] =image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowercase : Optional[int] =image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowercase : Union[str, Any] =True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowercase : Any =True
self.check_hidden_states_output(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , (padded_height, padded_width) )
@unittest.skip(reason='''MaskFormerSwin doesn\'t have pretrained checkpoints''' )
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason='''This will be fixed once MaskFormerSwin is replaced by native Swin''' )
def A__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
pass
def A__ ( self : Optional[Any] ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(UpperCAmelCase : List[str] ):
lowercase : Optional[int] =0
return t
def check_equivalence(UpperCAmelCase : Optional[int] , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int]={} ):
with torch.no_grad():
lowercase : int =model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase )
lowercase : Tuple =model(**UpperCAmelCase , return_dict=UpperCAmelCase , **UpperCAmelCase ).to_tuple()
def recursive_check(UpperCAmelCase : int , UpperCAmelCase : Tuple ):
if isinstance(UpperCAmelCase , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCAmelCase , UpperCAmelCase ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCAmelCase , UpperCAmelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCAmelCase ) , set_nan_tensor_to_zero(UpperCAmelCase ) , atol=1e-5 ) , msg=(
'''Tuple and dict output are not equal. Difference:'''
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}. Dict has'
f' `nan`: {torch.isnan(UpperCAmelCase ).any()} and `inf`: {torch.isinf(UpperCAmelCase )}.'
) , )
recursive_check(UpperCAmelCase , UpperCAmelCase )
for model_class in self.all_model_classes:
lowercase : Any =model_class(UpperCAmelCase )
model.to(UpperCAmelCase )
model.eval()
lowercase : List[Any] =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Optional[int] =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowercase : List[str] =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
lowercase : List[str] =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'''output_hidden_states''': True} )
lowercase : int =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
lowercase : List[Any] =self._prepare_for_class(UpperCAmelCase , UpperCAmelCase , return_labels=UpperCAmelCase )
check_equivalence(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , {'''output_hidden_states''': True} )
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , __A ):
"""simple docstring"""
UpperCamelCase_ = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase_ = MaskFormerSwinConfig
def A__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =MaskFormerSwinModelTester(self )
def A__ ( self : List[str] ) -> Tuple:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs_for_common()
lowercase : Tuple =inputs_dict['''pixel_values'''].shape[0]
for backbone_class in self.all_model_classes:
lowercase : str =backbone_class(UpperCAmelCase )
backbone.to(UpperCAmelCase )
backbone.eval()
lowercase : Optional[int] =backbone(**UpperCAmelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCAmelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowercase : List[Any] =backbone(**UpperCAmelCase , output_hidden_states=UpperCAmelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowercase : str =hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowercase : str =backbone(**UpperCAmelCase , output_attentions=UpperCAmelCase )
self.assertIsNotNone(outputs.attentions )
| 716 |
'''simple docstring'''
def lowercase_ ( __A : float , __A : int ) -> float:
"""simple docstring"""
if digit_amount > 0:
return round(number - int(__A ) , __A )
return number - int(__A )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 8 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be trained.'''} )
UpperCamelCase_ = field(
default='''./''' , metadata={'''help''': '''Save dir where model repo is cloned and models updates are saved to.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path of training dataset.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
UpperCamelCase_ = field(default=2 , metadata={'''help''': '''Batch size for training.'''} )
UpperCamelCase_ = field(default=2 , metadata={'''help''': '''Batch size for evaluation.'''} )
UpperCamelCase_ = field(default=0.1 , metadata={'''help''': '''Value of weight decay.'''} )
UpperCamelCase_ = field(
default=10000 , metadata={'''help''': '''Size of buffer used to shuffle streaming dataset.'''} )
UpperCamelCase_ = field(default=2e-4 , metadata={'''help''': '''Learning rate fo training.'''} )
UpperCamelCase_ = field(default='''cosine''' , metadata={'''help''': '''Learning rate.'''} )
UpperCamelCase_ = field(
default=750 , metadata={'''help''': '''Number of warmup steps in the learning rate schedule.'''} )
UpperCamelCase_ = field(
default=16 , metadata={'''help''': '''Number of gradient accumulation steps.'''} )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''Use gradient checkpointing to reduce memory footprint.'''} )
UpperCamelCase_ = field(default=50000 , metadata={'''help''': '''Maximum number of training steps.'''} )
UpperCamelCase_ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
UpperCamelCase_ = field(default=1024 , metadata={'''help''': '''Sequence lengths used for training.'''} )
UpperCamelCase_ = field(default=1 , metadata={'''help''': '''Training seed.'''} )
UpperCamelCase_ = field(
default=1024 , metadata={'''help''': '''Interval to save checkpoints. Measured as number of forward passes not training steps.'''} , )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''States path if the training should continue from a checkpoint folder.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''If True the data is pretokenized.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot-clean-valid''' , metadata={'''help''': '''Name or path of validation dataset.'''} )
UpperCamelCase_ = field(default=2 , metadata={'''help''': '''Batch size used for evaluation.'''} )
UpperCamelCase_ = field(
default=-1 , metadata={'''help''': '''Maximum number of evaluation steps. If -1 the full dataset is evaluated.'''} )
UpperCamelCase_ = field(default=1024 , metadata={'''help''': '''Length of sequences to be evaluated.'''} )
UpperCamelCase_ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Model name or path of model to be evaluated.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''The number of human-eval tasks to run. If not included all tasks are evaluated.'''} , )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''Sample from the language model\'s output distribution.'''} )
UpperCamelCase_ = field(default=0.2 , metadata={'''help''': '''Sampling temperature used for generation.'''} )
UpperCamelCase_ = field(default=256 , metadata={'''help''': '''Maximum number of newly generated tokens.'''} )
UpperCamelCase_ = field(default=0 , metadata={'''help''': '''Top-k parameter used for generation.'''} )
UpperCamelCase_ = field(default=0.95 , metadata={'''help''': '''Top-p parameter used for nucleus sampling.'''} )
UpperCamelCase_ = field(default=10 , metadata={'''help''': '''Number of generations to run in parallel.'''} )
UpperCamelCase_ = field(
default=200 , metadata={'''help''': '''Number of completions to generate for each sample.'''} )
UpperCamelCase_ = field(default=1 , metadata={'''help''': '''Random seed used for evaluation.'''} )
UpperCamelCase_ = field(
default='''eval_results.json''' , metadata={'''help''': '''Random seed used for evaluation.'''} )
UpperCamelCase_ = field(
default='''0''' , metadata={'''help''': '''Allow `code_eval` to execute Python code on machine'''} )
UpperCamelCase_ = field(
default=-1 , metadata={
'''help''': (
'''Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'''
''' number corresponds to which GPU device id to run on.'''
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default=__A , metadata={
'''help''': '''The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'''
} , )
UpperCamelCase_ = field(
default='''transformersbook/codeparrot''' , metadata={'''help''': '''Folder or name of dataset to process.'''} )
UpperCamelCase_ = field(
default='''codeparrot-clean''' , metadata={'''help''': '''Folder to save processed processed dataset.'''} )
UpperCamelCase_ = field(
default=100000 , metadata={'''help''': '''Number of files to save per JSON output file.'''} )
UpperCamelCase_ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
UpperCamelCase_ = field(
default=1000 , metadata={'''help''': '''Maximum line length in file, otherwise file is filtered.'''} )
UpperCamelCase_ = field(
default=100 , metadata={'''help''': '''Maximum mean line length in file, otherwise file is filtered.'''} )
UpperCamelCase_ = field(
default=0.25 , metadata={'''help''': '''Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'''} )
UpperCamelCase_ = field(
default=1.5 , metadata={'''help''': '''Minimum character token ratio for the file, otherwise file is filtered.'''} )
UpperCamelCase_ = field(
default=0.7 , metadata={'''help''': '''Probability for filtering config, test and uncommon files.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} , )
UpperCamelCase_ = field(
default=__A , metadata={'''help''': '''If True, near-duplicate samples are removed.'''} )
UpperCamelCase_ = field(
default=0.85 , metadata={'''help''': '''Jaccard threshold for near-duplicate samples.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''gpt2''' , metadata={'''help''': '''Base tokenizer to build new tokenizer from.'''} )
UpperCamelCase_ = field(
default='''transformersbook/codeparrot-train''' , metadata={'''help''': '''Dataset to train tokenizer on.'''} )
UpperCamelCase_ = field(default='''content''' , metadata={'''help''': '''Column containing text data to process.'''} )
UpperCamelCase_ = field(default=200000 , metadata={'''help''': '''Number of examples to train tokenizer on.'''} )
UpperCamelCase_ = field(
default=32768 , metadata={'''help''': '''Number of examples to train the tokenizer on.'''} )
UpperCamelCase_ = field(default='''codeparrot''' , metadata={'''help''': '''Name of new tokenizer.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Name or path to the tokenizer.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot-clean-train''' , metadata={'''help''': '''Name or path to the dataset to pretokenize.'''} )
UpperCamelCase_ = field(
default='''tokenized-codeparrot-train''' , metadata={'''help''': '''Repo name of the pretokenized data.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Number of workers used for code evaluation.'''} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
UpperCamelCase_ = field(
default='''gpt2-large''' , metadata={'''help''': '''Configuration to use for model initialization.'''} )
UpperCamelCase_ = field(
default='''codeparrot/codeparrot''' , metadata={'''help''': '''Tokenizer attached to model.'''} )
UpperCamelCase_ = field(default='''codeparrot''' , metadata={'''help''': '''Name of the created model.'''} )
UpperCamelCase_ = field(default=__A , metadata={'''help''': '''Push saved tokenizer to the hub.'''} )
| 717 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase_ ( __A : List[Any] , __A : int , __A : int ) -> Optional[int]:
"""simple docstring"""
return [
int(1_0_0_0 * (box[0] / width) ),
int(1_0_0_0 * (box[1] / height) ),
int(1_0_0_0 * (box[2] / width) ),
int(1_0_0_0 * (box[3] / height) ),
]
def lowercase_ ( __A : np.ndarray , __A : Optional[str] , __A : Optional[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : int =to_pil_image(__A )
lowercase , lowercase : Tuple =pil_image.size
lowercase : Optional[Any] =pytesseract.image_to_data(__A , lang=__A , output_type='''dict''' , config=__A )
lowercase , lowercase , lowercase , lowercase , lowercase : Optional[Any] =data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowercase : Dict =[idx for idx, word in enumerate(__A ) if not word.strip()]
lowercase : str =[word for idx, word in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : Optional[int] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : List[Any] =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : str =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
lowercase : int =[coord for idx, coord in enumerate(__A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowercase : Tuple =[]
for x, y, w, h in zip(__A , __A , __A , __A ):
lowercase : str =[x, y, x + w, y + h]
actual_boxes.append(__A )
# finally, normalize the bounding boxes
lowercase : List[str] =[]
for box in actual_boxes:
normalized_boxes.append(normalize_box(__A , __A , __A ) )
assert len(__A ) == len(__A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''pixel_values''']
def __init__( self : List[Any] , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : bool = True , UpperCAmelCase : float = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "" , **UpperCAmelCase : Tuple , ) -> None:
'''simple docstring'''
super().__init__(**UpperCAmelCase )
lowercase : Tuple =size if size is not None else {'''height''': 224, '''width''': 224}
lowercase : Optional[Any] =get_size_dict(UpperCAmelCase )
lowercase : Optional[Any] =do_resize
lowercase : List[Any] =size
lowercase : List[str] =resample
lowercase : Dict =do_rescale
lowercase : str =rescale_value
lowercase : Optional[int] =do_normalize
lowercase : Any =image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : Union[str, Any] =image_std if image_std is not None else IMAGENET_STANDARD_STD
lowercase : List[Any] =apply_ocr
lowercase : Union[str, Any] =ocr_lang
lowercase : str =tesseract_config
def A__ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
lowercase : Tuple =get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
lowercase : Optional[Any] =(size['''height'''], size['''width'''])
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[Any] , ) -> np.ndarray:
'''simple docstring'''
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Union[float, Iterable[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Optional[int] , ) -> np.ndarray:
'''simple docstring'''
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Any , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : Union[float, Iterable[float]] = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : List[str] , ) -> PIL.Image.Image:
'''simple docstring'''
lowercase : Optional[int] =do_resize if do_resize is not None else self.do_resize
lowercase : Tuple =size if size is not None else self.size
lowercase : Optional[int] =get_size_dict(UpperCAmelCase )
lowercase : List[str] =resample if resample is not None else self.resample
lowercase : List[Any] =do_rescale if do_rescale is not None else self.do_rescale
lowercase : List[Any] =rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : Optional[int] =do_normalize if do_normalize is not None else self.do_normalize
lowercase : List[Any] =image_mean if image_mean is not None else self.image_mean
lowercase : Optional[int] =image_std if image_std is not None else self.image_std
lowercase : Any =apply_ocr if apply_ocr is not None else self.apply_ocr
lowercase : Any =ocr_lang if ocr_lang is not None else self.ocr_lang
lowercase : Dict =tesseract_config if tesseract_config is not None else self.tesseract_config
lowercase : str =make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowercase : Tuple =[to_numpy_array(UpperCAmelCase ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowercase : int =[]
lowercase : Tuple =[]
for image in images:
lowercase , lowercase : Dict =apply_tesseract(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
words_batch.append(UpperCAmelCase )
boxes_batch.append(UpperCAmelCase )
if do_resize:
lowercase : int =[self.resize(image=UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_rescale:
lowercase : Tuple =[self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowercase : str =[self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowercase : Optional[Any] =[to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowercase : Dict =BatchFeature(data={'''pixel_values''': images} , tensor_type=UpperCAmelCase )
if apply_ocr:
lowercase : int =words_batch
lowercase : List[str] =boxes_batch
return data
| 8 | 0 |
'''simple docstring'''
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : list ) -> None:
'''simple docstring'''
lowercase : Union[str, Any] =set_counts
lowercase : Any =max(UpperCAmelCase )
lowercase : Optional[Any] =len(UpperCAmelCase )
lowercase : Tuple =[1] * num_sets
lowercase : List[Any] =list(range(UpperCAmelCase ) )
def A__ ( self : Optional[int] , UpperCAmelCase : int , UpperCAmelCase : int ) -> bool:
'''simple docstring'''
lowercase : int =self.get_parent(UpperCAmelCase )
lowercase : int =self.get_parent(UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowercase : str =0
lowercase : Dict =dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowercase : Optional[Any] =self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowercase : List[str] =0
lowercase : Tuple =src_parent
lowercase : int =self.set_counts[src_parent]
lowercase : Union[str, Any] =max(self.max_set , UpperCAmelCase )
return True
def A__ ( self : int , UpperCAmelCase : int ) -> int:
'''simple docstring'''
if self.parents[disj_set] == disj_set:
return disj_set
lowercase : Tuple =self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 718 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Tuple , UpperCAmelCase : List[str] , ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =parent
lowercase : Any =13
lowercase : Any =7
lowercase : Optional[int] =True
lowercase : Optional[int] =True
lowercase : Tuple =False
lowercase : Optional[Any] =True
lowercase : Dict =99
lowercase : Union[str, Any] =32
lowercase : Union[str, Any] =2
lowercase : Union[str, Any] =4
lowercase : List[str] =37
lowercase : str ='''gelu'''
lowercase : Dict =0.1
lowercase : List[Any] =0.1
lowercase : List[str] =512
lowercase : Optional[int] =16
lowercase : Optional[Any] =2
lowercase : List[str] =0.0_2
lowercase : Any =3
lowercase : Optional[Any] =4
lowercase : int =None
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Any =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Union[str, Any] =None
lowercase : Any =None
lowercase : str =None
if self.use_labels:
lowercase : Union[str, Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : List[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : Any , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =TFDistilBertModel(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : List[str] =model(UpperCAmelCase )
lowercase : str =[input_ids, input_mask]
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[str] =TFDistilBertForMaskedLM(config=UpperCAmelCase )
lowercase : int ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : Optional[int] ) -> Any:
'''simple docstring'''
lowercase : str =TFDistilBertForQuestionAnswering(config=UpperCAmelCase )
lowercase : int ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : str , UpperCAmelCase : int , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict ) -> Optional[int]:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Optional[Any] =TFDistilBertForSequenceClassification(UpperCAmelCase )
lowercase : str ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : Union[str, Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : str , UpperCAmelCase : str , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : int , UpperCAmelCase : Dict ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] =self.num_choices
lowercase : Optional[int] =TFDistilBertForMultipleChoice(UpperCAmelCase )
lowercase : Optional[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
}
lowercase : Tuple =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Dict =self.num_labels
lowercase : Tuple =TFDistilBertForTokenClassification(UpperCAmelCase )
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
lowercase : str =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.prepare_config_and_inputs()
((lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase) , (lowercase)) : Union[str, Any] =config_and_inputs
lowercase : Tuple ={'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFDistilBertModel,
'''fill-mask''': TFDistilBertForMaskedLM,
'''question-answering''': TFDistilBertForQuestionAnswering,
'''text-classification''': TFDistilBertForSequenceClassification,
'''token-classification''': TFDistilBertForTokenClassification,
'''zero-shot''': TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict ) -> str:
'''simple docstring'''
lowercase : str =TFDistilBertModelTester(self )
lowercase : int =ConfigTester(self , config_class=UpperCAmelCase , dim=37 )
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*UpperCAmelCase )
def A__ ( self : Tuple ) -> Any:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*UpperCAmelCase )
def A__ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*UpperCAmelCase )
def A__ ( self : Any ) -> str:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : str ) -> Union[str, Any]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowercase : Union[str, Any] =TFDistilBertModel.from_pretrained(UpperCAmelCase )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : List[str] ) -> List[Any]:
'''simple docstring'''
lowercase : Optional[Any] =TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowercase : Tuple =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[Any] =model(UpperCAmelCase )[0]
lowercase : str =[1, 6, 768]
self.assertEqual(output.shape , UpperCAmelCase )
lowercase : Optional[int] =tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
| 8 | 0 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
SCREAMING_SNAKE_CASE = None
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model',
},
'tokenizer_file': {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/tokenizer.json',
},
}
SCREAMING_SNAKE_CASE = {
'camembert-base': 512,
}
SCREAMING_SNAKE_CASE = '▁'
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ['''input_ids''', '''attention_mask''']
UpperCamelCase_ = CamembertTokenizer
def __init__( self : Union[str, Any] , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : str=None , UpperCAmelCase : Union[str, Any]="<s>" , UpperCAmelCase : Tuple="</s>" , UpperCAmelCase : Dict="</s>" , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : Union[str, Any]="<unk>" , UpperCAmelCase : Union[str, Any]="<pad>" , UpperCAmelCase : List[Any]="<mask>" , UpperCAmelCase : Union[str, Any]=["<s>NOTUSED", "</s>NOTUSED"] , **UpperCAmelCase : Any , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Any =AddedToken(UpperCAmelCase , lstrip=UpperCAmelCase , rstrip=UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ) else mask_token
super().__init__(
UpperCAmelCase , tokenizer_file=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , sep_token=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , additional_special_tokens=UpperCAmelCase , **UpperCAmelCase , )
lowercase : Optional[int] =vocab_file
lowercase : Any =False if not self.vocab_file else True
def A__ ( self : List[Any] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowercase : Optional[int] =[self.cls_token_id]
lowercase : Dict =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Tuple =[self.sep_token_id]
lowercase : Optional[Any] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCAmelCase ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
lowercase : Optional[Any] =os.path.join(
UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase ):
copyfile(self.vocab_file , UpperCAmelCase )
return (out_vocab_file,)
| 719 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig']
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ['RemBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'RemBertForCausalLM',
'RemBertForMaskedLM',
'RemBertForMultipleChoice',
'RemBertForQuestionAnswering',
'RemBertForSequenceClassification',
'RemBertForTokenClassification',
'RemBertLayer',
'RemBertModel',
'RemBertPreTrainedModel',
'load_tf_weights_in_rembert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRemBertForCausalLM',
'TFRemBertForMaskedLM',
'TFRemBertForMultipleChoice',
'TFRemBertForQuestionAnswering',
'TFRemBertForSequenceClassification',
'TFRemBertForTokenClassification',
'TFRemBertLayer',
'TFRemBertModel',
'TFRemBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'SCUT-DLVCLab/lilt-roberta-en-base': (
'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json'
),
}
class lowercase__ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''lilt'''
def __init__( self : Tuple , UpperCAmelCase : Any=3_0522 , UpperCAmelCase : Dict=768 , UpperCAmelCase : int=12 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Dict=3072 , UpperCAmelCase : Dict="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Any=0.1 , UpperCAmelCase : Optional[int]=512 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : Dict=1e-12 , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Tuple="absolute" , UpperCAmelCase : Any=None , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : Dict=1024 , **UpperCAmelCase : Optional[int] , ) -> Optional[int]:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase , **UpperCAmelCase )
lowercase : List[Any] =vocab_size
lowercase : Dict =hidden_size
lowercase : Optional[Any] =num_hidden_layers
lowercase : int =num_attention_heads
lowercase : str =hidden_act
lowercase : Optional[Any] =intermediate_size
lowercase : Optional[int] =hidden_dropout_prob
lowercase : List[Any] =attention_probs_dropout_prob
lowercase : Union[str, Any] =max_position_embeddings
lowercase : Optional[Any] =type_vocab_size
lowercase : int =initializer_range
lowercase : int =layer_norm_eps
lowercase : Union[str, Any] =position_embedding_type
lowercase : Dict =classifier_dropout
lowercase : str =channel_shrink_ratio
lowercase : Any =max_ad_position_embeddings
| 720 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 0 |
'''simple docstring'''
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = None
UpperCamelCase_ = None
@property
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Optional[int] =self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''sampling_rate''' ) )
self.assertTrue(hasattr(UpperCAmelCase , '''padding_value''' ) )
def A__ ( self : List[str] ) -> Dict:
'''simple docstring'''
lowercase : int =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Dict =feat_extract.model_input_names[0]
lowercase : Union[str, Any] =BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(UpperCAmelCase ) == len(UpperCAmelCase ) for x, y in zip(UpperCAmelCase , processed_features[input_name] ) ) )
lowercase : Optional[Any] =self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
lowercase : List[Any] =BatchFeature({input_name: speech_inputs} , tensor_type='''np''' )
lowercase : int =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase : Optional[Any] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
lowercase : Dict =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Union[str, Any] =feat_extract.model_input_names[0]
lowercase : List[str] =BatchFeature({input_name: speech_inputs} , tensor_type='''pt''' )
lowercase : Tuple =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase : Tuple =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def A__ ( self : List[str] ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.feat_extract_tester.prepare_inputs_for_common(equal_length=UpperCAmelCase )
lowercase : List[Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Tuple =feat_extract.model_input_names[0]
lowercase : str =BatchFeature({input_name: speech_inputs} , tensor_type='''tf''' )
lowercase : Dict =processed_features[input_name]
if len(batch_features_input.shape ) < 3:
lowercase : List[str] =batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def A__ ( self : int , UpperCAmelCase : List[Any]=False ) -> str:
'''simple docstring'''
def _inputs_have_equal_length(UpperCAmelCase : List[str] ):
lowercase : Tuple =len(input[0] )
for input_slice in input[1:]:
if len(UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : List[str] , UpperCAmelCase : Any ):
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCAmelCase , UpperCAmelCase ):
if not np.allclose(np.asarray(UpperCAmelCase ) , np.asarray(UpperCAmelCase ) , atol=1e-3 ):
return False
return True
lowercase : str =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase )
lowercase : List[str] =feat_extract.model_input_names[0]
lowercase : Any =BatchFeature({input_name: speech_inputs} )
lowercase : List[str] =self.feat_extract_tester.seq_length_diff
lowercase : Union[str, Any] =self.feat_extract_tester.max_seq_length + pad_diff
lowercase : Union[str, Any] =self.feat_extract_tester.min_seq_length
lowercase : Tuple =self.feat_extract_tester.batch_size
lowercase : List[str] =self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
lowercase : Union[str, Any] =feat_extract.pad(UpperCAmelCase , padding=UpperCAmelCase )
lowercase : str =input_a[input_name]
lowercase : List[str] =feat_extract.pad(UpperCAmelCase , padding='''longest''' )
lowercase : Any =input_a[input_name]
lowercase : Tuple =feat_extract.pad(UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[-1] ) )
lowercase : Union[str, Any] =input_a[input_name]
lowercase : Tuple =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
lowercase : str =input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''max_length''' )[input_name]
lowercase : List[Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=UpperCAmelCase , return_tensors='''np''' )
lowercase : Union[str, Any] =input_a[input_name]
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
lowercase : str =feat_extract.pad(UpperCAmelCase , pad_to_multiple_of=10 )
lowercase : int =input_a[input_name]
lowercase : Any =feat_extract.pad(UpperCAmelCase , padding='''longest''' , pad_to_multiple_of=10 )
lowercase : List[Any] =input_a[input_name]
lowercase : Union[str, Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=UpperCAmelCase )
lowercase : int =input_a[input_name]
lowercase : Dict =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , pad_to_multiple_of=10 , max_length=UpperCAmelCase , return_tensors='''np''' , )
lowercase : Any =input_a[input_name]
self.assertTrue(all(len(UpperCAmelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
lowercase : Union[str, Any] =pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(UpperCAmelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
lowercase : Union[str, Any] =(np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def A__ ( self : Optional[int] , UpperCAmelCase : Optional[int]=False ) -> Optional[Any]:
'''simple docstring'''
def _inputs_have_equal_length(UpperCAmelCase : int ):
lowercase : List[Any] =len(input[0] )
for input_slice in input[1:]:
if len(UpperCAmelCase ) != length:
return False
return True
def _inputs_are_equal(UpperCAmelCase : Dict , UpperCAmelCase : Any ):
if len(UpperCAmelCase ) != len(UpperCAmelCase ):
return False
for input_slice_a, input_slice_a in zip(UpperCAmelCase , UpperCAmelCase ):
if not np.allclose(np.asarray(UpperCAmelCase ) , np.asarray(UpperCAmelCase ) , atol=1e-3 ):
return False
return True
lowercase : Dict =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Union[str, Any] =self.feat_extract_tester.prepare_inputs_for_common(numpify=UpperCAmelCase )
lowercase : List[str] =feat_extract.model_input_names[0]
lowercase : Union[str, Any] =BatchFeature({input_name: speech_inputs} )
# truncate to smallest
lowercase : Union[str, Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , truncation=UpperCAmelCase )
lowercase : List[str] =input_a[input_name]
lowercase : str =feat_extract.pad(UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) )
lowercase : Tuple =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
# truncate to smallest with np
lowercase : Any =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' , truncation=UpperCAmelCase , )
lowercase : Union[str, Any] =input_a[input_name]
lowercase : List[Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , return_tensors='''np''' )
lowercase : int =input_a[input_name]
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
# truncate to middle
lowercase : Optional[Any] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase , return_tensors='''np''' , )
lowercase : Union[str, Any] =input_a[input_name]
lowercase : int =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , truncation=UpperCAmelCase )
lowercase : str =input_a[input_name]
lowercase : str =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[1] ) , return_tensors='''np''' )
lowercase : Tuple =input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(_inputs_are_equal(UpperCAmelCase , UpperCAmelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , truncation=UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''longest''' , truncation=UpperCAmelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''longest''' , truncation=UpperCAmelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(UpperCAmelCase ):
feat_extract.pad(UpperCAmelCase , padding='''max_length''' , truncation=UpperCAmelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
lowercase : int =12
lowercase : List[str] =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase , truncation=UpperCAmelCase , )
lowercase : int =input_a[input_name]
lowercase : Tuple =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=UpperCAmelCase , )
lowercase : Dict =input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
lowercase : Dict =len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
lowercase : List[Any] =((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(UpperCAmelCase ) )
self.assertFalse(_inputs_have_equal_length(UpperCAmelCase ) )
def A__ ( self : str ) -> int:
'''simple docstring'''
self._check_padding(numpify=UpperCAmelCase )
def A__ ( self : Tuple ) -> Dict:
'''simple docstring'''
self._check_padding(numpify=UpperCAmelCase )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
self._check_truncation(numpify=UpperCAmelCase )
def A__ ( self : Any ) -> Any:
'''simple docstring'''
self._check_truncation(numpify=UpperCAmelCase )
@require_torch
def A__ ( self : Any ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : int =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : str =feat_extract.model_input_names[0]
lowercase : Optional[int] =BatchFeature({input_name: speech_inputs} )
lowercase : Optional[Any] =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase : List[str] =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''pt''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =self.feature_extraction_class(**self.feat_extract_dict )
lowercase : Tuple =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : Optional[Any] =feat_extract.model_input_names[0]
lowercase : int =BatchFeature({input_name: speech_inputs} )
lowercase : str =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )[input_name]
lowercase : List[str] =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''tf''' )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def A__ ( self : str ) -> List[Any]:
'''simple docstring'''
lowercase : str =self.feat_extract_dict
lowercase : Union[str, Any] =True
lowercase : List[str] =self.feature_extraction_class(**UpperCAmelCase )
lowercase : Optional[int] =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : int =[len(UpperCAmelCase ) for x in speech_inputs]
lowercase : Tuple =feat_extract.model_input_names[0]
lowercase : Dict =BatchFeature({input_name: speech_inputs} )
lowercase : str =feat_extract.pad(UpperCAmelCase , padding='''longest''' , return_tensors='''np''' )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , UpperCAmelCase )
def A__ ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Dict =self.feat_extract_dict
lowercase : List[str] =True
lowercase : Optional[Any] =self.feature_extraction_class(**UpperCAmelCase )
lowercase : Any =self.feat_extract_tester.prepare_inputs_for_common()
lowercase : Optional[Any] =[len(UpperCAmelCase ) for x in speech_inputs]
lowercase : Tuple =feat_extract.model_input_names[0]
lowercase : Optional[Any] =BatchFeature({input_name: speech_inputs} )
lowercase : List[str] =min(UpperCAmelCase )
lowercase : Any =feat_extract.pad(
UpperCAmelCase , padding='''max_length''' , max_length=UpperCAmelCase , truncation=UpperCAmelCase , return_tensors='''np''' )
self.assertIn('''attention_mask''' , UpperCAmelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 721 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
SCREAMING_SNAKE_CASE = 'https://www.indeed.co.in/jobs?q=mobile+app+development&l='
def lowercase_ ( __A : str = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
lowercase : List[Any] =BeautifulSoup(requests.get(url + location ).content , '''html.parser''' )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all('''div''' , attrs={'''data-tn-component''': '''organicJob'''} ):
lowercase : List[str] =job.find('''a''' , attrs={'''data-tn-element''': '''jobTitle'''} ).text.strip()
lowercase : Union[str, Any] =job.find('''span''' , {'''class''': '''company'''} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('Bangalore'), 1):
print(f"""Job {i:>2} is {job[0]} at {job[1]}""")
| 8 | 0 |
'''simple docstring'''
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = ['''image_processor''', '''tokenizer''']
UpperCamelCase_ = '''AutoImageProcessor'''
UpperCamelCase_ = '''AutoTokenizer'''
def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : List[str] ) -> str:
'''simple docstring'''
super().__init__(UpperCAmelCase , UpperCAmelCase )
lowercase : Union[str, Any] =self.image_processor
def __call__( self : Dict , UpperCAmelCase : Union[str, Any]=None , UpperCAmelCase : int=None , UpperCAmelCase : str=None , **UpperCAmelCase : Dict ) -> int:
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowercase : Optional[int] =self.tokenizer(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if images is not None:
lowercase : Dict =self.image_processor(UpperCAmelCase , return_tensors=UpperCAmelCase , **UpperCAmelCase )
if text is not None and images is not None:
lowercase : Optional[int] =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase ) , tensor_type=UpperCAmelCase )
def A__ ( self : Union[str, Any] , *UpperCAmelCase : List[str] , **UpperCAmelCase : int ) -> List[str]:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self : Union[str, Any] , *UpperCAmelCase : Any , **UpperCAmelCase : str ) -> List[Any]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return ["input_ids", "attention_mask", "pixel_values"]
| 700 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import argparse
import os
import re
SCREAMING_SNAKE_CASE = 'src/diffusers'
# Pattern that looks at the indentation in a line.
SCREAMING_SNAKE_CASE = re.compile(r'^(\s*)\S')
# Pattern that matches `"key":" and puts `key` in group 0.
SCREAMING_SNAKE_CASE = re.compile(r'^\s*"([^"]+)":')
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
SCREAMING_SNAKE_CASE = re.compile(r'^\s*_import_structure\["([^"]+)"\]')
# Pattern that matches `"key",` and puts `key` in group 0.
SCREAMING_SNAKE_CASE = re.compile(r'^\s*"([^"]+)",\s*$')
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
SCREAMING_SNAKE_CASE = re.compile(r'\[([^\]]+)\]')
def lowercase_ ( __A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict =_re_indent.search(__A )
return "" if search is None else search.groups()[0]
def lowercase_ ( __A : List[str] , __A : Tuple="" , __A : List[str]=None , __A : Dict=None ) -> str:
"""simple docstring"""
lowercase : List[str] =0
lowercase : List[Any] =code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(__A ):
index += 1
lowercase : Tuple =['''\n'''.join(lines[:index] )]
else:
lowercase : Optional[Any] =[]
# We split into blocks until we get to the `end_prompt` (or the end of the block).
lowercase : List[str] =[lines[index]]
index += 1
while index < len(__A ) and (end_prompt is None or not lines[index].startswith(__A )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(__A ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(__A ) )
if index < len(__A ) - 1:
lowercase : Tuple =[lines[index + 1]]
index += 1
else:
lowercase : List[Any] =[]
else:
blocks.append('''\n'''.join(__A ) )
lowercase : Any =[lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(__A ) > 0:
blocks.append('''\n'''.join(__A ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(__A ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def lowercase_ ( __A : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
def _inner(__A : Tuple ):
return key(__A ).lower().replace('''_''' , '''''' )
return _inner
def lowercase_ ( __A : str , __A : Union[str, Any]=None ) -> Union[str, Any]:
"""simple docstring"""
def noop(__A : Optional[Any] ):
return x
if key is None:
lowercase : str =noop
# Constants are all uppercase, they go first.
lowercase : int =[obj for obj in objects if key(__A ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
lowercase : Tuple =[obj for obj in objects if key(__A )[0].isupper() and not key(__A ).isupper()]
# Functions begin with a lowercase, they go last.
lowercase : List[str] =[obj for obj in objects if not key(__A )[0].isupper()]
lowercase : Dict =ignore_underscore(__A )
return sorted(__A , key=__A ) + sorted(__A , key=__A ) + sorted(__A , key=__A )
def lowercase_ ( __A : Union[str, Any] ) -> int:
"""simple docstring"""
def _replace(__A : List[str] ):
lowercase : Dict =match.groups()[0]
if "," not in imports:
return F'[{imports}]'
lowercase : Union[str, Any] =[part.strip().replace('''"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase : Any =keys[:-1]
return "[" + ", ".join([F'"{k}"' for k in sort_objects(__A )] ) + "]"
lowercase : Tuple =import_statement.split('''\n''' )
if len(__A ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
lowercase : int =2 if lines[1].strip() == '''[''' else 1
lowercase : List[str] =[(i, _re_strip_line.search(__A ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
lowercase : Tuple =sort_objects(__A , key=lambda __A : x[1] )
lowercase : List[Any] =[lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(__A ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
lowercase : Union[str, Any] =_re_bracket_content.sub(_replace , lines[1] )
else:
lowercase : List[str] =[part.strip().replace('''"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
lowercase : Union[str, Any] =keys[:-1]
lowercase : Optional[int] =get_indent(lines[1] ) + ''', '''.join([F'"{k}"' for k in sort_objects(__A )] )
return "\n".join(__A )
else:
# Finally we have to deal with imports fitting on one line
lowercase : Optional[Any] =_re_bracket_content.sub(_replace , __A )
return import_statement
def lowercase_ ( __A : int , __A : int=True ) -> int:
"""simple docstring"""
with open(__A , '''r''' ) as f:
lowercase : Optional[Any] =f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
lowercase : Tuple =split_code_in_indented_blocks(
__A , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(__A ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
lowercase : int =main_blocks[block_idx]
lowercase : List[Any] =block.split('''\n''' )
# Get to the start of the imports.
lowercase : List[str] =0
while line_idx < len(__A ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
lowercase : int =len(__A )
else:
line_idx += 1
if line_idx >= len(__A ):
continue
# Ignore beginning and last line: they don't contain anything.
lowercase : int ='''\n'''.join(block_lines[line_idx:-1] )
lowercase : List[Any] =get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
lowercase : int =split_code_in_indented_blocks(__A , indent_level=__A )
# We have two categories of import key: list or _import_structure[key].append/extend
lowercase : List[str] =_re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
lowercase : int =[(pattern.search(__A ).groups()[0] if pattern.search(__A ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
lowercase : List[Any] =[(i, key) for i, key in enumerate(__A ) if key is not None]
lowercase : Optional[int] =[x[0] for x in sorted(__A , key=lambda __A : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
lowercase : Optional[Any] =0
lowercase : str =[]
for i in range(len(__A ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
lowercase : Optional[Any] =sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(__A )
count += 1
# And we put our main block back together with its first and last line.
lowercase : Any ='''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(__A ):
if check_only:
return True
else:
print(F'Overwriting {file}.' )
with open(__A , '''w''' ) as f:
f.write('''\n'''.join(__A ) )
def lowercase_ ( __A : Tuple=True ) -> List[str]:
"""simple docstring"""
lowercase : Tuple =[]
for root, _, files in os.walk(__A ):
if "__init__.py" in files:
lowercase : Optional[int] =sort_imports(os.path.join(__A , '''__init__.py''' ) , check_only=__A )
if result:
lowercase : Optional[Any] =[os.path.join(__A , '''__init__.py''' )]
if len(__A ) > 0:
raise ValueError(F'Would overwrite {len(__A )} files, run `make style`.' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
SCREAMING_SNAKE_CASE = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 701 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
'configuration_mega': ['MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MegaConfig', 'MegaOnnxConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'MEGA_PRETRAINED_MODEL_ARCHIVE_LIST',
'MegaForCausalLM',
'MegaForMaskedLM',
'MegaForMultipleChoice',
'MegaForQuestionAnswering',
'MegaForSequenceClassification',
'MegaForTokenClassification',
'MegaModel',
'MegaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 | 0 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
SCREAMING_SNAKE_CASE = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowercase_ ( __A : Optional[Any] , __A : Tuple , __A : Dict , __A : Optional[int] , __A : List[str] ) -> Dict:
"""simple docstring"""
for attribute in key.split('''.''' ):
lowercase : Union[str, Any] =getattr(__A , __A )
if weight_type is not None:
lowercase : int =getattr(__A , __A ).shape
else:
lowercase : str =hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
lowercase : List[str] =value
elif weight_type == "weight_g":
lowercase : Optional[int] =value
elif weight_type == "weight_v":
lowercase : List[str] =value
elif weight_type == "bias":
lowercase : str =value
else:
lowercase : Optional[Any] =value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def lowercase_ ( __A : int , __A : List[str] ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[Any] =[]
lowercase : int =fairseq_model.state_dict()
lowercase : Union[str, Any] =hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : Optional[Any] =False
if "conv_layers" in name:
load_conv_layer(
__A , __A , __A , __A , hf_model.config.feat_extract_norm == '''group''' , )
lowercase : Optional[Any] =True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase : Any =True
if "*" in mapped_key:
lowercase : Any =name.split(__A )[0].split('''.''' )[-2]
lowercase : int =mapped_key.replace('''*''' , __A )
if "weight_g" in name:
lowercase : List[str] ='''weight_g'''
elif "weight_v" in name:
lowercase : str ='''weight_v'''
elif "bias" in name and "relative_attention_bias" not in name:
lowercase : int ='''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase : List[Any] ='''weight'''
else:
lowercase : Optional[int] =None
set_recursively(__A , __A , __A , __A , __A )
continue
if not is_used:
unused_weights.append(__A )
logger.warning(F'Unused weights: {unused_weights}' )
def lowercase_ ( __A : Any , __A : Optional[Any] , __A : Dict , __A : Union[str, Any] , __A : Tuple ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[Any] =full_name.split('''conv_layers.''' )[-1]
lowercase : Dict =name.split('''.''' )
lowercase : List[str] =int(items[0] )
lowercase : Union[str, Any] =int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
lowercase : Optional[int] =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
lowercase : Tuple =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
lowercase : Union[str, Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
lowercase : Optional[Any] =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__A )
@torch.no_grad()
def lowercase_ ( __A : Any , __A : Tuple , __A : Dict=None ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[Any] =torch.load(__A )
lowercase : Any =WavLMConfigOrig(checkpoint['''cfg'''] )
lowercase : int =WavLMOrig(__A )
model.load_state_dict(checkpoint['''model'''] )
model.eval()
if config_path is not None:
lowercase : Tuple =WavLMConfig.from_pretrained(__A )
else:
lowercase : Any =WavLMConfig()
lowercase : Dict =WavLMModel(__A )
recursively_load_weights(__A , __A )
hf_wavlm.save_pretrained(__A )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 702 |
'''simple docstring'''
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
SCREAMING_SNAKE_CASE = {
'vocab_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'
},
'merges_file': {
'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'
},
}
SCREAMING_SNAKE_CASE = {'allegro/herbert-base-cased': 514}
SCREAMING_SNAKE_CASE = {}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = HerbertTokenizer
def __init__( self : Dict , UpperCAmelCase : Optional[Any]=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : Any=None , UpperCAmelCase : Tuple="<s>" , UpperCAmelCase : List[Any]="<unk>" , UpperCAmelCase : str="<pad>" , UpperCAmelCase : Optional[Any]="<mask>" , UpperCAmelCase : List[str]="</s>" , **UpperCAmelCase : List[str] , ) -> Dict:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , cls_token=UpperCAmelCase , unk_token=UpperCAmelCase , pad_token=UpperCAmelCase , mask_token=UpperCAmelCase , sep_token=UpperCAmelCase , **UpperCAmelCase , )
def A__ ( self : int , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : List[Any] =[self.cls_token_id]
lowercase : Any =[self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def A__ ( self : Any , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None , UpperCAmelCase : bool = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase , token_ids_a=UpperCAmelCase , already_has_special_tokens=UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase )) + [1]
return [1] + ([0] * len(UpperCAmelCase )) + [1] + ([0] * len(UpperCAmelCase )) + [1]
def A__ ( self : Optional[int] , UpperCAmelCase : List[int] , UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
'''simple docstring'''
lowercase : Optional[Any] =[self.sep_token_id]
lowercase : Dict =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self : Union[str, Any] , UpperCAmelCase : str , UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
'''simple docstring'''
lowercase : List[Any] =self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
| 8 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = '''table-transformer'''
UpperCamelCase_ = ['''past_key_values''']
UpperCamelCase_ = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : int , UpperCAmelCase : Dict=True , UpperCAmelCase : int=None , UpperCAmelCase : List[Any]=3 , UpperCAmelCase : Dict=100 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : int=2048 , UpperCAmelCase : List[Any]=8 , UpperCAmelCase : Any=6 , UpperCAmelCase : List[Any]=2048 , UpperCAmelCase : Union[str, Any]=8 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : Optional[Any]="relu" , UpperCAmelCase : List[str]=256 , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : List[str]=0.0 , UpperCAmelCase : Tuple=0.0_2 , UpperCAmelCase : str=1.0 , UpperCAmelCase : int=False , UpperCAmelCase : List[str]="sine" , UpperCAmelCase : Tuple="resnet50" , UpperCAmelCase : int=True , UpperCAmelCase : Union[str, Any]=False , UpperCAmelCase : Dict=1 , UpperCAmelCase : Tuple=5 , UpperCAmelCase : Dict=2 , UpperCAmelCase : int=1 , UpperCAmelCase : Union[str, Any]=1 , UpperCAmelCase : int=5 , UpperCAmelCase : str=2 , UpperCAmelCase : Union[str, Any]=0.1 , **UpperCAmelCase : List[str] , ) -> str:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase : List[Any] =CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(UpperCAmelCase , UpperCAmelCase ):
lowercase : Any =backbone_config.get('''model_type''' )
lowercase : Union[str, Any] =CONFIG_MAPPING[backbone_model_type]
lowercase : Dict =config_class.from_dict(UpperCAmelCase )
# set timm attributes to None
lowercase : Union[str, Any] =None, None, None
lowercase : Optional[int] =use_timm_backbone
lowercase : Tuple =backbone_config
lowercase : int =num_channels
lowercase : int =num_queries
lowercase : List[str] =d_model
lowercase : int =encoder_ffn_dim
lowercase : Tuple =encoder_layers
lowercase : Optional[Any] =encoder_attention_heads
lowercase : Dict =decoder_ffn_dim
lowercase : Optional[Any] =decoder_layers
lowercase : List[Any] =decoder_attention_heads
lowercase : Optional[int] =dropout
lowercase : List[Any] =attention_dropout
lowercase : str =activation_dropout
lowercase : List[Any] =activation_function
lowercase : str =init_std
lowercase : int =init_xavier_std
lowercase : str =encoder_layerdrop
lowercase : Any =decoder_layerdrop
lowercase : str =encoder_layers
lowercase : Any =auxiliary_loss
lowercase : Any =position_embedding_type
lowercase : str =backbone
lowercase : List[Any] =use_pretrained_backbone
lowercase : str =dilation
# Hungarian matcher
lowercase : Union[str, Any] =class_cost
lowercase : Tuple =bbox_cost
lowercase : Optional[Any] =giou_cost
# Loss coefficients
lowercase : str =mask_loss_coefficient
lowercase : List[Any] =dice_loss_coefficient
lowercase : Any =bbox_loss_coefficient
lowercase : Any =giou_loss_coefficient
lowercase : str =eos_coefficient
super().__init__(is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
@property
def A__ ( self : Tuple ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
return self.d_model
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
UpperCamelCase_ = version.parse('''1.11''' )
@property
def A__ ( self : List[str] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def A__ ( self : str ) -> float:
'''simple docstring'''
return 1e-5
@property
def A__ ( self : List[str] ) -> int:
'''simple docstring'''
return 12
| 703 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 8 | 0 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class UpperCAmelCase_ ( __A ):
"""simple docstring"""
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Any ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[int] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Any ='''
import socket
def offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Tuple ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : List[str] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : Tuple =self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Optional[Any] ='''1'''
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : str ) -> List[str]:
'''simple docstring'''
lowercase : str ='''
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
'''
lowercase : Optional[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task="fill-mask", model=mname)
print("success")
'''
lowercase : Optional[int] ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")
socket.socket = offline_socket
'''
# Force fetching the files so that we can use the cache
lowercase : Optional[Any] ='''hf-internal-testing/tiny-random-bert'''
BertConfig.from_pretrained(UpperCAmelCase )
BertModel.from_pretrained(UpperCAmelCase )
BertTokenizer.from_pretrained(UpperCAmelCase )
pipeline(task='''fill-mask''' , model=UpperCAmelCase )
# baseline - just load from_pretrained with normal network
lowercase : Optional[Any] =[sys.executable, '''-c''', '''\n'''.join([load, run, mock] )]
# should succeed
lowercase : str =self.get_env()
lowercase : Any =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] ='''
from transformers import BertConfig, BertModel, BertTokenizer
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert-sharded"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print("success")
'''
lowercase : int ='''
import socket
def offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")
socket.socket = offline_socket
'''
# baseline - just load from_pretrained with normal network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# next emulate no network
lowercase : Tuple =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : Any ='''1'''
lowercase : Optional[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
@require_torch
def A__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[int] ='''
from transformers import pipeline
'''
lowercase : List[Any] ='''
mname = "hf-internal-testing/tiny-random-bert"
pipe = pipeline(model=mname)
'''
lowercase : Tuple ='''
import socket
def offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")
socket.socket = offline_socket
'''
lowercase : Tuple =self.get_env()
lowercase : Optional[int] ='''1'''
lowercase : Union[str, Any] =[sys.executable, '''-c''', '''\n'''.join([load, mock, run] )]
lowercase : Dict =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'''You cannot infer task automatically within `pipeline` when using offline mode''' , result.stderr.decode().replace('''\n''' , '''''' ) , )
@require_torch
def A__ ( self : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] ='''
from transformers import AutoModel
'''
lowercase : Dict ='''
mname = "hf-internal-testing/test_dynamic_model"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print("success")
'''
# baseline - just load from_pretrained with normal network
lowercase : Dict =[sys.executable, '''-c''', '''\n'''.join([load, run] )]
# should succeed
lowercase : Optional[Any] =self.get_env()
lowercase : int =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
lowercase : List[str] ='''1'''
lowercase : List[Any] =subprocess.run(UpperCAmelCase , env=UpperCAmelCase , check=UpperCAmelCase , capture_output=UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('''success''' , result.stdout.decode() )
| 704 |
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'Alexander Joslin'
import operator as op
from .stack import Stack
def lowercase_ ( __A : str ) -> int:
"""simple docstring"""
lowercase : int ={'''*''': op.mul, '''/''': op.truediv, '''+''': op.add, '''-''': op.sub}
lowercase : Stack[int] =Stack()
lowercase : Stack[str] =Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(__A ) )
elif i in operators:
# RULE 2
operator_stack.push(__A )
elif i == ")":
# RULE 4
lowercase : Optional[Any] =operator_stack.peek()
operator_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : Optional[Any] =operand_stack.peek()
operand_stack.pop()
lowercase : List[str] =operators[opr](__A , __A )
operand_stack.push(__A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 8 | 0 |
'''simple docstring'''
def lowercase_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
while b:
lowercase : Union[str, Any] =b, a % b
return a
def lowercase_ ( __A : int , __A : int ) -> int:
"""simple docstring"""
return a if b == 0 else euclidean_gcd_recursive(__A , a % b )
def lowercase_ ( ) -> Any:
"""simple docstring"""
print(F'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' )
print(F'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' )
print(F'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' )
print(F'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' )
print(F'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' )
print(F'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' )
print(F'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' )
print(F'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' )
print(F'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' )
if __name__ == "__main__":
main()
| 705 |
'''simple docstring'''
import re
def lowercase_ ( __A : str ) -> bool:
"""simple docstring"""
lowercase : Any =re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(__A , __A ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('+918827897895'))
| 8 | 0 |
'''simple docstring'''
import heapq
def lowercase_ ( __A : dict ) -> set[int]:
"""simple docstring"""
lowercase : list[list] =[]
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(__A , [-1 * len(__A ), (key, value)] )
# chosen_vertices = set of chosen vertices
lowercase : Dict =set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
lowercase : List[str] =heapq.heappop(__A )[1][0]
chosen_vertices.add(__A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
lowercase : str =elem[1][1].index(__A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(__A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"""Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}""")
| 706 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class UpperCAmelCase_ :
"""simple docstring"""
def __init__( self : Optional[Any] , UpperCAmelCase : str , UpperCAmelCase : Union[str, Any]=13 , UpperCAmelCase : int=7 , UpperCAmelCase : Union[str, Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : Any=99 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=2 , UpperCAmelCase : str=4 , UpperCAmelCase : List[Any]=37 , UpperCAmelCase : str="gelu" , UpperCAmelCase : List[Any]=0.1 , UpperCAmelCase : int=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : List[str]=2 , UpperCAmelCase : Any=0.0_2 , UpperCAmelCase : List[str]=3 , UpperCAmelCase : Dict=4 , UpperCAmelCase : Optional[int]=None , ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =parent
lowercase : Tuple =13
lowercase : Any =7
lowercase : Union[str, Any] =True
lowercase : Any =True
lowercase : Optional[int] =True
lowercase : List[str] =True
lowercase : Tuple =99
lowercase : str =32
lowercase : Union[str, Any] =2
lowercase : Dict =4
lowercase : Union[str, Any] =37
lowercase : Union[str, Any] ='''gelu'''
lowercase : Any =0.1
lowercase : Dict =0.1
lowercase : Dict =512
lowercase : List[str] =16
lowercase : Dict =2
lowercase : int =0.0_2
lowercase : List[Any] =3
lowercase : List[str] =4
lowercase : Optional[Any] =None
def A__ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : str =None
if self.use_input_mask:
lowercase : int =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Any =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : List[Any] =None
lowercase : List[str] =None
lowercase : List[str] =None
if self.use_labels:
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Any =ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=UpperCAmelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self : List[str] , UpperCAmelCase : int , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModel(config=UpperCAmelCase )
lowercase : Optional[int] ={'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
lowercase : Tuple =[input_ids, input_mask]
lowercase : str =model(UpperCAmelCase )
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self : Tuple , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : Any , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ) -> Any:
'''simple docstring'''
lowercase : Dict =True
lowercase : List[Any] =TFRoFormerForCausalLM(config=UpperCAmelCase )
lowercase : Union[str, Any] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def A__ ( self : List[Any] , UpperCAmelCase : Dict , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Dict , UpperCAmelCase : int , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Tuple ) -> Dict:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerForMaskedLM(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self : Tuple , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[int] , UpperCAmelCase : Dict , UpperCAmelCase : Tuple , UpperCAmelCase : int , UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Optional[int] =TFRoFormerForSequenceClassification(config=UpperCAmelCase )
lowercase : Optional[int] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : Optional[Any] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self : Optional[Any] , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : str , UpperCAmelCase : Dict , UpperCAmelCase : List[str] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : Tuple ) -> Union[str, Any]:
'''simple docstring'''
lowercase : int =self.num_choices
lowercase : Tuple =TFRoFormerForMultipleChoice(config=UpperCAmelCase )
lowercase : Union[str, Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : Tuple =tf.tile(tf.expand_dims(UpperCAmelCase , 1 ) , (1, self.num_choices, 1) )
lowercase : List[Any] ={
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
lowercase : Dict =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A__ ( self : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] , UpperCAmelCase : Any , UpperCAmelCase : List[str] , UpperCAmelCase : int ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : Union[str, Any] =TFRoFormerForTokenClassification(config=UpperCAmelCase )
lowercase : Tuple ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self : int , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : List[Any] , UpperCAmelCase : str ) -> Any:
'''simple docstring'''
lowercase : Tuple =TFRoFormerForQuestionAnswering(config=UpperCAmelCase )
lowercase : List[str] ={
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
lowercase : List[str] =model(UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Optional[int] =config_and_inputs
lowercase : str ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __A , __A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCamelCase_ = (
{
'''feature-extraction''': TFRoFormerModel,
'''fill-mask''': TFRoFormerForMaskedLM,
'''question-answering''': TFRoFormerForQuestionAnswering,
'''text-classification''': TFRoFormerForSequenceClassification,
'''text-generation''': TFRoFormerForCausalLM,
'''token-classification''': TFRoFormerForTokenClassification,
'''zero-shot''': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCamelCase_ = False
UpperCamelCase_ = False
def A__ ( self : Dict , UpperCAmelCase : str , UpperCAmelCase : List[Any] , UpperCAmelCase : int , UpperCAmelCase : Optional[int] , UpperCAmelCase : str ) -> Tuple:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : List[Any] =TFRoFormerModelTester(self )
lowercase : Union[str, Any] =ConfigTester(self , config_class=UpperCAmelCase , hidden_size=37 )
def A__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase )
def A__ ( self : str ) -> Optional[int]:
'''simple docstring'''
lowercase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*UpperCAmelCase )
def A__ ( self : int ) -> Tuple:
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase )
def A__ ( self : Dict ) -> List[str]:
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase )
def A__ ( self : Dict ) -> Any:
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase )
def A__ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase )
@slow
def A__ ( self : str ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] =TFRoFormerModel.from_pretrained('''junnyu/roformer_chinese_base''' )
self.assertIsNotNone(UpperCAmelCase )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
lowercase : Any =TFRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase : Optional[Any] =tf.constant([[0, 1, 2, 3, 4, 5]] )
lowercase : List[str] =model(UpperCAmelCase )[0]
# TODO Replace vocab size
lowercase : Tuple =5_0000
lowercase : List[str] =[1, 6, vocab_size]
self.assertEqual(output.shape , UpperCAmelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
lowercase : Dict =tf.constant(
[
[
[-0.1_2_0_5_3_3_4_1, -1.0_2_6_4_9_0_1, 0.2_9_2_2_1_9_4_6],
[-1.5_1_3_3_7_8_3, 0.1_9_7_4_3_3, 0.1_5_1_9_0_6_0_7],
[-5.0_1_3_5_4_0_3, -3.9_0_0_2_5_6, -0.8_4_0_3_8_7_6_4],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : int ) -> List[Any]:
'''simple docstring'''
lowercase : Union[str, Any] =tf.constant([[4, 10]] )
lowercase : List[Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
lowercase : Any =emba(input_ids.shape )
lowercase : List[str] =tf.constant(
[[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0, 1.0_0_0_0], [0.8_4_1_5, 0.0_4_6_4, 0.0_0_2_2, 0.5_4_0_3, 0.9_9_8_9, 1.0_0_0_0]] )
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
def A__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
lowercase : Optional[Any] =tf.constant(
[
[0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0, 0.0_0_0_0],
[0.8_4_1_5, 0.8_2_1_9, 0.8_0_2_0, 0.7_8_1_9, 0.7_6_1_7],
[0.9_0_9_3, 0.9_3_6_4, 0.9_5_8_1, 0.9_7_4_9, 0.9_8_7_0],
] )
lowercase : Tuple =TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 , embedding_dim=512 )
emba([2, 16, 512] )
lowercase : str =emba.weight[:3, :5]
tf.debugging.assert_near(UpperCAmelCase , UpperCAmelCase , atol=self.tolerance )
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ = 1e-4
def A__ ( self : Dict ) -> Dict:
'''simple docstring'''
lowercase : str =tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =-tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 100
lowercase : Any =TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
lowercase : Optional[Any] =embed_positions([2, 16, 768] )[None, None, :, :]
lowercase , lowercase : Optional[int] =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
lowercase : Any =tf.constant(
[
[0.0_0_0_0, 0.0_1_0_0, 0.0_2_0_0, 0.0_3_0_0, 0.0_4_0_0, 0.0_5_0_0, 0.0_6_0_0, 0.0_7_0_0],
[-0.2_0_1_2, 0.8_8_9_7, 0.0_2_6_3, 0.9_4_0_1, 0.2_0_7_4, 0.9_4_6_3, 0.3_4_8_1, 0.9_3_4_3],
[-1.7_0_5_7, 0.6_2_7_1, -1.2_1_4_5, 1.3_8_9_7, -0.6_3_0_3, 1.7_6_4_7, -0.1_1_7_3, 1.8_9_8_5],
[-2.1_7_3_1, -1.6_3_9_7, -2.7_3_5_8, 0.2_8_5_4, -2.1_8_4_0, 1.7_1_8_3, -1.3_0_1_8, 2.4_8_7_1],
[0.2_7_1_7, -3.6_1_7_3, -2.9_2_0_6, -2.1_9_8_8, -3.6_6_3_8, 0.3_8_5_8, -2.9_1_5_5, 2.2_9_8_0],
[3.9_8_5_9, -2.1_5_8_0, -0.7_9_8_4, -4.4_9_0_4, -4.1_1_8_1, -2.0_2_5_2, -4.4_7_8_2, 1.1_2_5_3],
] )
lowercase : int =tf.constant(
[
[0.0_0_0_0, -0.0_1_0_0, -0.0_2_0_0, -0.0_3_0_0, -0.0_4_0_0, -0.0_5_0_0, -0.0_6_0_0, -0.0_7_0_0],
[0.2_0_1_2, -0.8_8_9_7, -0.0_2_6_3, -0.9_4_0_1, -0.2_0_7_4, -0.9_4_6_3, -0.3_4_8_1, -0.9_3_4_3],
[1.7_0_5_7, -0.6_2_7_1, 1.2_1_4_5, -1.3_8_9_7, 0.6_3_0_3, -1.7_6_4_7, 0.1_1_7_3, -1.8_9_8_5],
[2.1_7_3_1, 1.6_3_9_7, 2.7_3_5_8, -0.2_8_5_4, 2.1_8_4_0, -1.7_1_8_3, 1.3_0_1_8, -2.4_8_7_1],
[-0.2_7_1_7, 3.6_1_7_3, 2.9_2_0_6, 2.1_9_8_8, 3.6_6_3_8, -0.3_8_5_8, 2.9_1_5_5, -2.2_9_8_0],
[-3.9_8_5_9, 2.1_5_8_0, 0.7_9_8_4, 4.4_9_0_4, 4.1_1_8_1, 2.0_2_5_2, 4.4_7_8_2, -1.1_2_5_3],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , UpperCAmelCase , atol=self.tolerance )
| 8 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.