code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class snake_case_ ( __lowercase ):
__lowerCAmelCase = '''trocr'''
__lowerCAmelCase = ['''past_key_values''']
__lowerCAmelCase = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , a_=5_0_2_6_5 , a_=1_0_2_4 , a_=1_2 , a_=1_6 , a_=4_0_9_6 , a_="gelu" , a_=5_1_2 , a_=0.1 , a_=0.0 , a_=0.0 , a_=2 , a_=0.02 , a_=0.0 , a_=True , a_=False , a_=True , a_=True , a_=1 , a_=0 , a_=2 , **a_ , ):
a_ : Optional[Any] = vocab_size
a_ : Union[str, Any] = d_model
a_ : List[Any] = decoder_layers
a_ : Optional[Any] = decoder_attention_heads
a_ : Optional[int] = decoder_ffn_dim
a_ : Optional[int] = activation_function
a_ : Dict = max_position_embeddings
a_ : Optional[int] = dropout
a_ : str = attention_dropout
a_ : List[str] = activation_dropout
a_ : str = init_std
a_ : List[str] = decoder_layerdrop
a_ : Any = use_cache
a_ : int = scale_embedding
a_ : List[Any] = use_learned_position_embeddings
a_ : int = layernorm_embedding
super().__init__(
pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , **a_ , ) | 237 |
'''simple docstring'''
from sklearn.metrics import matthews_corrcoef
import datasets
__UpperCamelCase = "\nCompute the Matthews correlation coefficient (MCC)\n\nThe Matthews correlation coefficient is used in machine learning as a\nmeasure of the quality of binary and multiclass classifications. It takes\ninto account true and false positives and negatives and is generally\nregarded as a balanced measure which can be used even if the classes are of\nvery different sizes. The MCC is in essence a correlation coefficient value\nbetween -1 and +1. A coefficient of +1 represents a perfect prediction, 0\nan average random prediction and -1 an inverse prediction. The statistic\nis also known as the phi coefficient. [source: Wikipedia]\n"
__UpperCamelCase = "\nArgs:\n predictions (list of int): Predicted labels, as returned by a model.\n references (list of int): Ground truth labels.\n sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.\nReturns:\n matthews_correlation (dict containing float): Matthews correlation.\nExamples:\n Example 1, a basic example with only predictions and references as inputs:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3])\n >>> print(round(results['matthews_correlation'], 2))\n 0.54\n\n Example 2, the same example as above, but also including sample weights:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 3, 1, 1, 1, 2])\n >>> print(round(results['matthews_correlation'], 2))\n 0.1\n\n Example 3, the same example as above, but with sample weights that cause a negative correlation:\n >>> matthews_metric = datasets.load_metric(\"matthews_correlation\")\n >>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],\n ... predictions=[1, 2, 2, 0, 3, 3],\n ... sample_weight=[0.5, 1, 0, 0, 0, 1])\n >>> print(round(results['matthews_correlation'], 2))\n -0.25\n"
__UpperCamelCase = "\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _A ( datasets.Metric ):
def lowercase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] , )
def lowercase__ ( self : List[Any] , __magic_name__ : Tuple , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any]=None ) -> Optional[int]:
"""simple docstring"""
return {
"matthews_correlation": float(matthews_corrcoef(__magic_name__ , __magic_name__ , sample_weight=__magic_name__ ) ),
}
| 26 | 0 |
import inspect
import unittest
from transformers import RegNetConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from transformers.utils import cached_property, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.models.regnet.modeling_flax_regnet import FlaxRegNetForImageClassification, FlaxRegNetModel
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __UpperCamelCase , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=3 , __UpperCamelCase=10 , __UpperCamelCase=[10, 20, 30, 40] , __UpperCamelCase=[1, 1, 2, 1] , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase="relu" , __UpperCamelCase=3 , __UpperCamelCase=None , ):
A_ = parent
A_ = batch_size
A_ = image_size
A_ = num_channels
A_ = embeddings_size
A_ = hidden_sizes
A_ = depths
A_ = is_training
A_ = use_labels
A_ = hidden_act
A_ = num_labels
A_ = scope
A_ = len(__UpperCamelCase )
def lowercase_ ( self ):
A_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ = self.get_config()
return config, pixel_values
def lowercase_ ( self ):
return RegNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase ):
A_ = FlaxRegNetModel(config=__UpperCamelCase )
A_ = model(__UpperCamelCase )
# Output shape (b, c, h, w)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def lowercase_ ( self , __UpperCamelCase , __UpperCamelCase ):
A_ = self.num_labels
A_ = FlaxRegNetForImageClassification(config=__UpperCamelCase )
A_ = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self ):
A_ = self.prepare_config_and_inputs()
A_ , A_ = config_and_inputs
A_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class lowerCamelCase ( __snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ = (FlaxRegNetModel, FlaxRegNetForImageClassification) if is_flax_available() else ()
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def lowercase_ ( self ):
A_ = FlaxRegNetModelTester(self )
A_ = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def lowercase_ ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowercase_ ( self ):
return
def lowercase_ ( self ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def lowercase_ ( self ):
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip(reason="RegNet does not use inputs_embeds" )
def lowercase_ ( self ):
pass
@unittest.skip(reason="RegNet does not support input and output embeddings" )
def lowercase_ ( self ):
pass
def lowercase_ ( self ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(__UpperCamelCase )
A_ = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def lowercase_ ( self ):
def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
A_ = model_class(__UpperCamelCase )
A_ = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
A_ = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A_ = self.model_tester.num_stages
self.assertEqual(len(__UpperCamelCase ) , expected_num_stages + 1 )
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A_ = True
check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowercase_ ( self ):
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
A_ = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
A_ = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase , **__UpperCamelCase ):
return model(pixel_values=__UpperCamelCase , **__UpperCamelCase )
with self.subTest("JIT Enabled" ):
A_ = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
A_ = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase ( )-> Optional[Any]:
A_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_flax
class lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase_ ( self ):
return AutoImageProcessor.from_pretrained("facebook/regnet-y-040" ) if is_vision_available() else None
@slow
def lowercase_ ( self ):
A_ = FlaxRegNetForImageClassification.from_pretrained("facebook/regnet-y-040" )
A_ = self.default_image_processor
A_ = prepare_img()
A_ = image_processor(images=__UpperCamelCase , return_tensors="np" )
A_ = model(**__UpperCamelCase )
# verify the logits
A_ = (1, 1000)
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
A_ = jnp.array([-0.4180, -1.5051, -3.4836] )
self.assertTrue(jnp.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1E-4 ) )
| 608 |
from __future__ import annotations
__magic_name__ : List[Any] = 8.9_8_8e9 # units = N * m^s * C^-2
def lowerCAmelCase ( snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float )-> dict[str, float]:
A_ = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if distance < 0:
raise ValueError("Distance cannot be negative" )
if force == 0:
A_ = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
A_ = abs(snake_case__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
A_ = abs(snake_case__ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
A_ = (COULOMBS_CONSTANT * charge_product / abs(snake_case__ )) ** 0.5
return {"distance": distance}
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 608 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
_lowercase = logging.get_logger(__name__)
class _lowercase ( __a ):
def __init__( self , *A__ , **A__ ) -> None:
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , A__ , )
super().__init__(*A__ , **A__ )
| 342 |
'''simple docstring'''
import math
def __UpperCamelCase ( a : int ) ->list[int]:
snake_case = []
snake_case = 2
snake_case = int(math.sqrt(a ) ) # Size of every segment
snake_case = [True] * (end + 1)
snake_case = []
while start <= end:
if temp[start] is True:
in_prime.append(a )
for i in range(start * start , end + 1 , a ):
snake_case = False
start += 1
prime += in_prime
snake_case = end + 1
snake_case = min(2 * end , a )
while low <= n:
snake_case = [True] * (high - low + 1)
for each in in_prime:
snake_case = math.floor(low / each ) * each
if t < low:
t += each
for j in range(a , high + 1 , a ):
snake_case = False
for j in range(len(a ) ):
if temp[j] is True:
prime.append(j + low )
snake_case = high + 1
snake_case = min(high + end , a )
return prime
print(sieve(10**6))
| 342 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""ctrl""": """https://huggingface.co/ctrl/resolve/main/config.json"""}
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : str ='ctrl'
__lowerCamelCase : Any =['past_key_values']
__lowerCamelCase : Any ={
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : str , __lowercase : Any=246534 , __lowercase : Tuple=256 , __lowercase : List[str]=1280 , __lowercase : Union[str, Any]=8192 , __lowercase : List[str]=48 , __lowercase : List[str]=16 , __lowercase : Tuple=0.1 , __lowercase : Optional[Any]=0.1 , __lowercase : List[str]=1E-6 , __lowercase : Union[str, Any]=0.02 , __lowercase : Tuple=True , **__lowercase : Optional[int] , ):
'''simple docstring'''
__a = vocab_size
__a = n_positions
__a = n_embd
__a = n_layer
__a = n_head
__a = dff
__a = resid_pdrop
__a = embd_pdrop
__a = layer_norm_epsilon
__a = initializer_range
__a = use_cache
super().__init__(**__lowercase )
| 547 |
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int = 3 , _SCREAMING_SNAKE_CASE : int = 7 , _SCREAMING_SNAKE_CASE : int = 100_0000 ):
"""simple docstring"""
__a = 0
__a = 1
for current_denominator in range(1 , limit + 1 ):
__a = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
__a = current_numerator
__a = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=100_0000))
| 547 | 1 |
import argparse
import json
import pickle
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Any = SwinConfig.from_pretrained(
'''microsoft/swin-tiny-patch4-window7-224''' , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
snake_case__ : Dict = MaskFormerConfig(backbone_config=__lowerCAmelCase )
snake_case__ : Optional[int] = '''huggingface/label-files'''
if "ade20k-full" in model_name:
# this should be ok
snake_case__ : Optional[int] = 847
snake_case__ : Tuple = '''maskformer-ade20k-full-id2label.json'''
elif "ade" in model_name:
# this should be ok
snake_case__ : Tuple = 150
snake_case__ : Optional[Any] = '''ade20k-id2label.json'''
elif "coco-stuff" in model_name:
# this should be ok
snake_case__ : List[Any] = 171
snake_case__ : Union[str, Any] = '''maskformer-coco-stuff-id2label.json'''
elif "coco" in model_name:
# TODO
snake_case__ : Tuple = 133
snake_case__ : Dict = '''coco-panoptic-id2label.json'''
elif "cityscapes" in model_name:
# this should be ok
snake_case__ : Dict = 19
snake_case__ : Any = '''cityscapes-id2label.json'''
elif "vistas" in model_name:
# this should be ok
snake_case__ : List[str] = 65
snake_case__ : Dict = '''mapillary-vistas-id2label.json'''
snake_case__ : Any = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='''dataset''' ) , '''r''' ) )
snake_case__ : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
return config
def _lowerCAmelCase ( __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : List[Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.patch_embed.proj.weight''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.patch_embed.proj.bias''', '''model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.patch_embed.norm.weight''', '''model.pixel_level_module.encoder.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.patch_embed.norm.bias''', '''model.pixel_level_module.encoder.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.norm2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.layers.{i}.downsample.reduction.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.weight""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.layers.{i}.downsample.norm.bias""", f"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") )
# FPN
rename_keys.append(('''sem_seg_head.layer_4.weight''', '''model.pixel_level_module.decoder.fpn.stem.0.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.weight''', '''model.pixel_level_module.decoder.fpn.stem.1.weight''') )
rename_keys.append(('''sem_seg_head.layer_4.norm.bias''', '''model.pixel_level_module.decoder.fpn.stem.1.bias''') )
for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ):
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") )
rename_keys.append((f"""sem_seg_head.adapter_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.weight""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") )
rename_keys.append((f"""sem_seg_head.layer_{source_index}.norm.bias""", f"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") )
rename_keys.append(('''sem_seg_head.mask_features.weight''', '''model.pixel_level_module.decoder.mask_projection.weight''') )
rename_keys.append(('''sem_seg_head.mask_features.bias''', '''model.pixel_level_module.decoder.mask_projection.bias''') )
# Transformer decoder
for idx in range(config.decoder_config.decoder_layers ):
# self-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") )
# cross-attention out projection
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") )
# MLP 1
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") )
# MLP 2
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", f"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", f"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") )
# layernorm 1 (self-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", f"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") )
# layernorm 2 (cross-attention layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", f"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") )
# layernorm 3 (final layernorm)
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", f"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.weight''', '''model.transformer_module.decoder.layernorm.weight''') )
rename_keys.append(('''sem_seg_head.predictor.transformer.decoder.norm.bias''', '''model.transformer_module.decoder.layernorm.bias''') )
# heads on top
rename_keys.append(('''sem_seg_head.predictor.query_embed.weight''', '''model.transformer_module.queries_embedder.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.weight''', '''model.transformer_module.input_projection.weight''') )
rename_keys.append(('''sem_seg_head.predictor.input_proj.bias''', '''model.transformer_module.input_projection.bias''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.weight''', '''class_predictor.weight''') )
rename_keys.append(('''sem_seg_head.predictor.class_embed.bias''', '''class_predictor.bias''') )
for i in range(3 ):
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", f"""mask_embedder.{i}.0.weight""") )
rename_keys.append((f"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", f"""mask_embedder.{i}.0.bias""") )
# fmt: on
return rename_keys
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> int:
"""simple docstring"""
snake_case__ : Tuple = dct.pop(__lowerCAmelCase )
snake_case__ : str = val
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
snake_case__ : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
snake_case__ : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" )
snake_case__ : List[str] = state_dict.pop(f"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : int = in_proj_weight[:dim, :]
snake_case__ : Dict = in_proj_bias[: dim]
snake_case__ : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
snake_case__ : List[str] = in_proj_bias[
dim : dim * 2
]
snake_case__ : Dict = in_proj_weight[
-dim :, :
]
snake_case__ : List[Any] = in_proj_bias[-dim :]
# fmt: on
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : Dict = config.decoder_config.hidden_size
for idx in range(config.decoder_config.decoder_layers ):
# read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ : List[str] = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" )
snake_case__ : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : List[str] = in_proj_weight[: hidden_size, :]
snake_case__ : Any = in_proj_bias[:config.hidden_size]
snake_case__ : str = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ : List[str] = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : Dict = in_proj_weight[-hidden_size :, :]
snake_case__ : Optional[Any] = in_proj_bias[-hidden_size :]
# read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias)
snake_case__ : Optional[int] = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" )
snake_case__ : Dict = state_dict.pop(f"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Union[str, Any] = in_proj_weight[: hidden_size, :]
snake_case__ : List[str] = in_proj_bias[:config.hidden_size]
snake_case__ : Any = in_proj_weight[hidden_size : hidden_size * 2, :]
snake_case__ : int = in_proj_bias[hidden_size : hidden_size * 2]
snake_case__ : Optional[Any] = in_proj_weight[-hidden_size :, :]
snake_case__ : Dict = in_proj_bias[-hidden_size :]
# fmt: on
def _lowerCAmelCase ( ) -> torch.Tensor:
"""simple docstring"""
snake_case__ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : str = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = False ) -> int:
"""simple docstring"""
snake_case__ : Dict = get_maskformer_config(__lowerCAmelCase )
# load original state_dict
with open(__lowerCAmelCase , '''rb''' ) as f:
snake_case__ : List[str] = pickle.load(__lowerCAmelCase )
snake_case__ : Union[str, Any] = data['''model''']
# for name, param in state_dict.items():
# print(name, param.shape)
# rename keys
snake_case__ : Optional[int] = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
read_in_swin_q_k_v(__lowerCAmelCase , config.backbone_config )
read_in_decoder_q_k_v(__lowerCAmelCase , __lowerCAmelCase )
# update to torch tensors
for key, value in state_dict.items():
snake_case__ : Any = torch.from_numpy(__lowerCAmelCase )
# load 🤗 model
snake_case__ : Tuple = MaskFormerForInstanceSegmentation(__lowerCAmelCase )
model.eval()
for name, param in model.named_parameters():
print(__lowerCAmelCase , param.shape )
snake_case__ , snake_case__ : Tuple = model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
assert missing_keys == [
"model.pixel_level_module.encoder.model.layernorm.weight",
"model.pixel_level_module.encoder.model.layernorm.bias",
]
assert len(__lowerCAmelCase ) == 0, f"""Unexpected keys: {unexpected_keys}"""
# verify results
snake_case__ : str = prepare_img()
if "vistas" in model_name:
snake_case__ : Tuple = 65
elif "cityscapes" in model_name:
snake_case__ : List[Any] = 65535
else:
snake_case__ : Tuple = 255
snake_case__ : List[Any] = True if '''ade''' in model_name else False
snake_case__ : Dict = MaskFormerImageProcessor(ignore_index=__lowerCAmelCase , reduce_labels=__lowerCAmelCase )
snake_case__ : str = image_processor(__lowerCAmelCase , return_tensors='''pt''' )
snake_case__ : List[str] = model(**__lowerCAmelCase )
print('''Logits:''' , outputs.class_queries_logits[0, :3, :3] )
if model_name == "maskformer-swin-tiny-ade":
snake_case__ : Union[str, Any] = torch.tensor(
[[3.6_353, -4.4_770, -2.6_065], [0.5_081, -4.2_394, -3.5_343], [2.1_909, -5.0_353, -1.9_323]] )
assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __lowerCAmelCase , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"""Saving model and image processor to {pytorch_dump_folder_path}""" )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
image_processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print('''Pushing model and image processor to the hub...''' )
model.push_to_hub(f"""nielsr/{model_name}""" )
image_processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''maskformer-swin-tiny-ade''',
type=str,
help=('''Name of the MaskFormer model you\'d like to convert''',),
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''',
type=str,
help='''Path to the original state dict (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
A__ = parser.parse_args()
convert_maskformer_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 252 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCamelCase ( self :Dict ):
snake_case__ : Optional[Any] = 1
snake_case__ : int = 3
snake_case__ : Optional[int] = (3_2, 3_2)
snake_case__ : Any = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def __lowerCamelCase ( self :int ):
torch.manual_seed(0 )
snake_case__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) ,layers_per_block=2 ,sample_size=3_2 ,in_channels=7 ,out_channels=4 ,down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''', '''CrossAttnDownBlock2D''') ,up_block_types=('''CrossAttnUpBlock2D''', '''CrossAttnUpBlock2D''', '''UpBlock2D''') ,cross_attention_dim=3_2 ,attention_head_dim=8 ,use_linear_projection=__lowercase ,only_cross_attention=(True, True, False) ,num_class_embeds=1_0_0 ,)
return model
@property
def __lowerCamelCase ( self :List[Any] ):
torch.manual_seed(0 )
snake_case__ : Tuple = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] ,in_channels=3 ,out_channels=3 ,down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] ,up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] ,latent_channels=4 ,)
return model
@property
def __lowerCamelCase ( self :str ):
torch.manual_seed(0 )
snake_case__ : int = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=3_2 ,intermediate_size=3_7 ,layer_norm_eps=1e-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_0_0_0 ,hidden_act='''gelu''' ,projection_dim=5_1_2 ,)
return CLIPTextModel(__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : str = self.dummy_cond_unet_upscale
snake_case__ : Optional[int] = DDPMScheduler()
snake_case__ : Tuple = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case__ : List[Any] = self.dummy_vae
snake_case__ : Optional[int] = self.dummy_text_encoder
snake_case__ : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : Any = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case__ : List[str] = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
snake_case__ : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=__lowercase ,low_res_scheduler=__lowercase ,scheduler=__lowercase ,vae=__lowercase ,text_encoder=__lowercase ,tokenizer=__lowercase ,max_noise_level=3_5_0 ,)
snake_case__ : List[str] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : Tuple = '''A painting of a squirrel eating a burger'''
snake_case__ : int = torch.Generator(device=__lowercase ).manual_seed(0 )
snake_case__ : Optional[Any] = sd_pipe(
[prompt] ,image=__lowercase ,generator=__lowercase ,guidance_scale=6.0 ,noise_level=2_0 ,num_inference_steps=2 ,output_type='''np''' ,)
snake_case__ : Optional[int] = output.images
snake_case__ : Dict = torch.Generator(device=__lowercase ).manual_seed(0 )
snake_case__ : Tuple = sd_pipe(
[prompt] ,image=__lowercase ,generator=__lowercase ,guidance_scale=6.0 ,noise_level=2_0 ,num_inference_steps=2 ,output_type='''np''' ,return_dict=__lowercase ,)[0]
snake_case__ : List[str] = image[0, -3:, -3:, -1]
snake_case__ : List[Any] = image_from_tuple[0, -3:, -3:, -1]
snake_case__ : Any = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
snake_case__ : List[Any] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def __lowerCamelCase ( self :int ):
snake_case__ : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.dummy_cond_unet_upscale
snake_case__ : Optional[int] = DDPMScheduler()
snake_case__ : str = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case__ : Any = self.dummy_vae
snake_case__ : Any = self.dummy_text_encoder
snake_case__ : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : int = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case__ : Union[str, Any] = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
snake_case__ : Union[str, Any] = StableDiffusionUpscalePipeline(
unet=__lowercase ,low_res_scheduler=__lowercase ,scheduler=__lowercase ,vae=__lowercase ,text_encoder=__lowercase ,tokenizer=__lowercase ,max_noise_level=3_5_0 ,)
snake_case__ : Union[str, Any] = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : str = '''A painting of a squirrel eating a burger'''
snake_case__ : Tuple = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=2_0 ,num_inference_steps=2 ,output_type='''np''' ,)
snake_case__ : Tuple = output.images
assert image.shape[0] == 2
snake_case__ : Optional[Any] = torch.Generator(device=__lowercase ).manual_seed(0 )
snake_case__ : Dict = sd_pipe(
[prompt] ,image=__lowercase ,generator=__lowercase ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=2_0 ,num_inference_steps=2 ,output_type='''np''' ,)
snake_case__ : Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != '''cuda''' ,'''This test requires a GPU''' )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Tuple = self.dummy_cond_unet_upscale
snake_case__ : Tuple = DDPMScheduler()
snake_case__ : Dict = DDIMScheduler(prediction_type='''v_prediction''' )
snake_case__ : int = self.dummy_vae
snake_case__ : List[Any] = self.dummy_text_encoder
snake_case__ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
snake_case__ : Tuple = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
snake_case__ : Tuple = Image.fromarray(np.uinta(__lowercase ) ).convert('''RGB''' ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
snake_case__ : Optional[Any] = unet.half()
snake_case__ : Any = text_encoder.half()
# make sure here that pndm scheduler skips prk
snake_case__ : Tuple = StableDiffusionUpscalePipeline(
unet=__lowercase ,low_res_scheduler=__lowercase ,scheduler=__lowercase ,vae=__lowercase ,text_encoder=__lowercase ,tokenizer=__lowercase ,max_noise_level=3_5_0 ,)
snake_case__ : str = sd_pipe.to(__lowercase )
sd_pipe.set_progress_bar_config(disable=__lowercase )
snake_case__ : List[Any] = '''A painting of a squirrel eating a burger'''
snake_case__ : Optional[int] = torch.manual_seed(0 )
snake_case__ : str = sd_pipe(
[prompt] ,image=__lowercase ,generator=__lowercase ,num_inference_steps=2 ,output_type='''np''' ,).images
snake_case__ : Optional[Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a ( unittest.TestCase ):
def __lowerCamelCase ( self :Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : List[Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case__ : Optional[int] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat.npy''' )
snake_case__ : int = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case__ : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(__lowercase )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
snake_case__ : List[str] = '''a cat sitting on a park bench'''
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : Any = pipe(
prompt=__lowercase ,image=__lowercase ,generator=__lowercase ,output_type='''np''' ,)
snake_case__ : Any = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def __lowerCamelCase ( self :int ):
snake_case__ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case__ : Tuple = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'''
'''/upsampled_cat_fp16.npy''' )
snake_case__ : Tuple = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case__ : Optional[Any] = StableDiffusionUpscalePipeline.from_pretrained(
__lowercase ,torch_dtype=torch.floataa ,)
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = '''a cat sitting on a park bench'''
snake_case__ : Optional[int] = torch.manual_seed(0 )
snake_case__ : List[Any] = pipe(
prompt=__lowercase ,image=__lowercase ,generator=__lowercase ,output_type='''np''' ,)
snake_case__ : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def __lowerCamelCase ( self :Union[str, Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/sd2-upscale/low_res_cat.png''' )
snake_case__ : Optional[Any] = '''stabilityai/stable-diffusion-x4-upscaler'''
snake_case__ : List[Any] = StableDiffusionUpscalePipeline.from_pretrained(
__lowercase ,torch_dtype=torch.floataa ,)
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : List[Any] = '''a cat sitting on a park bench'''
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : Tuple = pipe(
prompt=__lowercase ,image=__lowercase ,generator=__lowercase ,num_inference_steps=5 ,output_type='''np''' ,)
snake_case__ : str = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 252 | 1 |
from __future__ import annotations
def _lowerCAmelCase ( __magic_name__ :list[int] , __magic_name__ :list[int] , __magic_name__ :list[int] , __magic_name__ :list[list[str]] , __magic_name__ :int , ):
UpperCAmelCase_ = len(__magic_name__ )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append(['''. ''' * i + '''Q ''' + '''. ''' * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__magic_name__ ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __magic_name__ , __magic_name__ , )
def _lowerCAmelCase ( __magic_name__ :int ):
UpperCAmelCase_ = []
depth_first_search([] , [] , [] , __magic_name__ , __magic_name__ )
# Print all the boards
for board in boards:
for column in board:
print(__magic_name__ )
print('''''' )
print(len(__magic_name__ ) , '''solutions were found.''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 407 |
from math import isqrt, loga
def _lowerCAmelCase ( __magic_name__ :int ):
UpperCAmelCase_ = [True] * max_number
for i in range(2 , isqrt(max_number - 1 ) + 1 ):
if is_prime[i]:
for j in range(i**2 , __magic_name__ , __magic_name__ ):
UpperCAmelCase_ = False
return [i for i in range(2 , __magic_name__ ) if is_prime[i]]
def _lowerCAmelCase ( __magic_name__ :int = 8_0_0_8_0_0 , __magic_name__ :int = 8_0_0_8_0_0 ):
UpperCAmelCase_ = degree * loga(__magic_name__ )
UpperCAmelCase_ = int(__magic_name__ )
UpperCAmelCase_ = calculate_prime_numbers(__magic_name__ )
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = len(__magic_name__ ) - 1
while left < right:
while (
prime_numbers[right] * loga(prime_numbers[left] )
+ prime_numbers[left] * loga(prime_numbers[right] )
> upper_bound
):
right -= 1
hybrid_integers_count += right - left
left += 1
return hybrid_integers_count
if __name__ == "__main__":
print(f"{solution() = }")
| 407 | 1 |
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class A ( lowerCamelCase__ ):
'''simple docstring'''
lowerCamelCase : str = (EulerDiscreteScheduler,)
lowerCamelCase : Any = 1_0
def UpperCAmelCase__ ( self : Tuple , **_UpperCamelCase : str):
_lowercase: Union[str, Any] = {
'num_train_timesteps': 1_100,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
}
config.update(**__snake_case)
return config
def UpperCAmelCase__ ( self : str):
for timesteps in [10, 50, 100, 1_000]:
self.check_over_configs(num_train_timesteps=__snake_case)
def UpperCAmelCase__ ( self : Tuple):
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2]):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case)
def UpperCAmelCase__ ( self : Dict):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__snake_case)
def UpperCAmelCase__ ( self : Dict):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case)
def UpperCAmelCase__ ( self : Any):
_lowercase: Optional[int] = self.scheduler_classes[0]
_lowercase: int = self.get_scheduler_config()
_lowercase: Optional[Any] = scheduler_class(**__snake_case)
scheduler.set_timesteps(self.num_inference_steps)
_lowercase: Union[str, Any] = torch.manual_seed(0)
_lowercase: Union[str, Any] = self.dummy_model()
_lowercase: Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase: Tuple = sample.to(__snake_case)
for i, t in enumerate(scheduler.timesteps):
_lowercase: int = scheduler.scale_model_input(__snake_case , __snake_case)
_lowercase: List[str] = model(__snake_case , __snake_case)
_lowercase: Dict = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case)
_lowercase: Union[str, Any] = output.prev_sample
_lowercase: Any = torch.sum(torch.abs(__snake_case))
_lowercase: List[Any] = torch.mean(torch.abs(__snake_case))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1e-3
def UpperCAmelCase__ ( self : Tuple):
_lowercase: Optional[int] = self.scheduler_classes[0]
_lowercase: List[Any] = self.get_scheduler_config(prediction_type="v_prediction")
_lowercase: int = scheduler_class(**__snake_case)
scheduler.set_timesteps(self.num_inference_steps)
_lowercase: Tuple = torch.manual_seed(0)
_lowercase: Union[str, Any] = self.dummy_model()
_lowercase: List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowercase: int = sample.to(__snake_case)
for i, t in enumerate(scheduler.timesteps):
_lowercase: Tuple = scheduler.scale_model_input(__snake_case , __snake_case)
_lowercase: Optional[int] = model(__snake_case , __snake_case)
_lowercase: Union[str, Any] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case)
_lowercase: Optional[Any] = output.prev_sample
_lowercase: Any = torch.sum(torch.abs(__snake_case))
_lowercase: List[Any] = torch.mean(torch.abs(__snake_case))
assert abs(result_sum.item() - 0.0_0_0_2) < 1e-2
assert abs(result_mean.item() - 2.2676e-06) < 1e-3
def UpperCAmelCase__ ( self : List[Any]):
_lowercase: Optional[Any] = self.scheduler_classes[0]
_lowercase: Tuple = self.get_scheduler_config()
_lowercase: str = scheduler_class(**__snake_case)
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case)
_lowercase: List[str] = torch.manual_seed(0)
_lowercase: Dict = self.dummy_model()
_lowercase: Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowercase: Optional[int] = sample.to(__snake_case)
for t in scheduler.timesteps:
_lowercase: int = scheduler.scale_model_input(__snake_case , __snake_case)
_lowercase: List[str] = model(__snake_case , __snake_case)
_lowercase: Optional[Any] = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case)
_lowercase: int = output.prev_sample
_lowercase: Optional[Any] = torch.sum(torch.abs(__snake_case))
_lowercase: List[str] = torch.mean(torch.abs(__snake_case))
assert abs(result_sum.item() - 1_0.0_8_0_7) < 1e-2
assert abs(result_mean.item() - 0.0_1_3_1) < 1e-3
def UpperCAmelCase__ ( self : List[str]):
_lowercase: Optional[int] = self.scheduler_classes[0]
_lowercase: Dict = self.get_scheduler_config()
_lowercase: Tuple = scheduler_class(**__snake_case , use_karras_sigmas=__snake_case)
scheduler.set_timesteps(self.num_inference_steps , device=__snake_case)
_lowercase: Optional[Any] = torch.manual_seed(0)
_lowercase: int = self.dummy_model()
_lowercase: Any = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_lowercase: Optional[int] = sample.to(__snake_case)
for t in scheduler.timesteps:
_lowercase: Tuple = scheduler.scale_model_input(__snake_case , __snake_case)
_lowercase: Any = model(__snake_case , __snake_case)
_lowercase: Dict = scheduler.step(__snake_case , __snake_case , __snake_case , generator=__snake_case)
_lowercase: str = output.prev_sample
_lowercase: Tuple = torch.sum(torch.abs(__snake_case))
_lowercase: Union[str, Any] = torch.mean(torch.abs(__snake_case))
assert abs(result_sum.item() - 1_2_4.5_2_2_9_9_4_9_9_5_1_1_7_1_9) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_1_3_9_3_2_6_3_3_3_9_9_9_6_3) < 1e-3
| 226 |
'''simple docstring'''
import json
import os
import re
import unittest
from transformers import CodeGenTokenizer, CodeGenTokenizerFast
from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a__( lowerCamelCase__ , unittest.TestCase ):
lowercase__ = CodeGenTokenizer
lowercase__ = CodeGenTokenizerFast
lowercase__ = True
lowercase__ = {"""add_prefix_space""": True}
lowercase__ = False
def lowercase_ ( self : List[str] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a : Any = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
'<|endoftext|>',
]
a : Dict = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : str = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
a : List[str] = {'unk_token': '<unk>'}
a : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
a : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__snake_case ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__snake_case ) )
def lowercase_ ( self : Tuple , **__snake_case : Dict ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizer.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : List[Any] , **__snake_case : List[str] ):
kwargs.update(self.special_tokens_map )
return CodeGenTokenizerFast.from_pretrained(self.tmpdirname , **__snake_case )
def lowercase_ ( self : Optional[int] , __snake_case : List[Any] ):
a : Tuple = 'lower newer'
a : str = 'lower newer'
return input_text, output_text
def lowercase_ ( self : str ):
a : List[Any] = CodeGenTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a : Dict = 'lower newer'
a : int = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er']
a : List[str] = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
a : Dict = tokens + [tokenizer.unk_token]
a : int = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowercase_ ( self : List[Any] ):
if not self.test_rust_tokenizer:
return
a : Optional[int] = self.get_tokenizer()
a : Tuple = self.get_rust_tokenizer(add_prefix_space=__snake_case )
a : List[str] = 'lower newer'
# Testing tokenization
a : Optional[int] = tokenizer.tokenize(__snake_case , add_prefix_space=__snake_case )
a : Optional[Any] = rust_tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids without special tokens
a : Tuple = tokenizer.encode(__snake_case , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
a : str = rust_tokenizer.encode(__snake_case , add_special_tokens=__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing conversion to ids with special tokens
a : Optional[int] = self.get_rust_tokenizer(add_prefix_space=__snake_case )
a : Optional[int] = tokenizer.encode(__snake_case , add_prefix_space=__snake_case )
a : Dict = rust_tokenizer.encode(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
# Testing the unknown token
a : Any = tokens + [rust_tokenizer.unk_token]
a : List[Any] = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def lowercase_ ( self : List[str] , *__snake_case : int , **__snake_case : str ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both CodeGen and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowercase_ ( self : List[str] , __snake_case : str=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
a : List[Any] = self.rust_tokenizer_class.from_pretrained(__snake_case , **__snake_case )
# Simple input
a : Dict = 'This is a simple input'
a : int = ['This is a simple input 1', 'This is a simple input 2']
a : Dict = ('This is a simple input', 'This is a pair')
a : Dict = [
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' )
# Simple input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' )
# Simple input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode , __snake_case , max_length=__snake_case , padding='max_length' )
# Pair input
self.assertRaises(__snake_case , tokenizer_r.encode_plus , __snake_case , max_length=__snake_case , padding='max_length' )
# Pair input
self.assertRaises(
__snake_case , tokenizer_r.batch_encode_plus , __snake_case , max_length=__snake_case , padding='max_length' , )
def lowercase_ ( self : Optional[Any] ):
a : Any = CodeGenTokenizer.from_pretrained(self.tmpdirname , pad_token='<pad>' )
# Simple input
a : Union[str, Any] = 'This is a simple input'
a : Dict = ['This is a simple input looooooooong', 'This is a simple input']
a : int = ('This is a simple input', 'This is a pair')
a : List[Any] = [
('This is a simple input loooooong', 'This is a simple input'),
('This is a simple pair loooooong', 'This is a simple pair'),
]
a : Optional[int] = tokenizer.pad_token_id
a : List[Any] = tokenizer(__snake_case , padding='max_length' , max_length=30 , return_tensors='np' )
a : Union[str, Any] = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='np' )
a : Optional[Any] = tokenizer(*__snake_case , padding='max_length' , max_length=60 , return_tensors='np' )
a : List[Any] = tokenizer(__snake_case , padding=__snake_case , truncate=__snake_case , return_tensors='np' )
# s
# test single string max_length padding
self.assertEqual(out_s['input_ids'].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['input_ids'] )
self.assertTrue(0 in out_s['attention_mask'] )
# s2
# test automatic padding
self.assertEqual(out_sa['input_ids'].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['input_ids'][0] )
self.assertFalse(0 in out_sa['attention_mask'][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['input_ids'][1] )
self.assertTrue(0 in out_sa['attention_mask'][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['input_ids'].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['input_ids'] )
self.assertTrue(0 in out_p['attention_mask'] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['input_ids'].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['input_ids'][0] )
self.assertFalse(0 in out_pa['attention_mask'][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['input_ids'][1] )
self.assertTrue(0 in out_pa['attention_mask'][1] )
def lowercase_ ( self : Optional[int] ):
a : int = '$$$'
a : Dict = CodeGenTokenizer.from_pretrained(self.tmpdirname , bos_token=__snake_case , add_bos_token=__snake_case )
a : int = 'This is a simple input'
a : Tuple = ['This is a simple input 1', 'This is a simple input 2']
a : Any = tokenizer.bos_token_id
a : Union[str, Any] = tokenizer(__snake_case )
a : Dict = tokenizer(__snake_case )
self.assertEqual(out_s.input_ids[0] , __snake_case )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
a : Optional[Any] = tokenizer.decode(out_s.input_ids )
a : Optional[Any] = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , __snake_case )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
@slow
def lowercase_ ( self : List[Any] ):
a : Tuple = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' )
a : Union[str, Any] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#'
a : Optional[Any] = '\nif len_a > len_b: result = a\nelse: result = b'
a : Any = tokenizer.encode(__snake_case )
a : Union[str, Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n']
a : List[str] = tokenizer.decode(__snake_case , truncate_before_pattern=__snake_case )
self.assertEqual(__snake_case , __snake_case )
def lowercase_ ( self : Any ):
pass | 526 | 0 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
A : Optional[int] = logging.get_logger(__name__)
A : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
A : Any = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
A : Union[str, Any] = {
"""allenai/led-base-16384""": 16_384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def _a ( ):
snake_case : Union[str, Any] =(
list(range(ord('''!''' ) , ord('''~''' ) + 1 ) ) + list(range(ord('''¡''' ) , ord('''¬''' ) + 1 ) ) + list(range(ord('''®''' ) , ord('''ÿ''' ) + 1 ) )
)
snake_case : Tuple =bs[:]
snake_case : Any =0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCamelCase_ )
cs.append(2**8 + n )
n += 1
snake_case : int =[chr(lowerCamelCase_ ) for n in cs]
return dict(zip(lowerCamelCase_ , lowerCamelCase_ ) )
def _a ( lowerCamelCase_ ):
snake_case : Optional[Any] =set()
snake_case : Union[str, Any] =word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case : Optional[Any] =char
return pairs
class lowerCAmelCase_ ( a_ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = ['input_ids', 'attention_mask']
def __init__( self : Optional[int], _snake_case : Optional[int], _snake_case : Optional[Any], _snake_case : str="replace", _snake_case : int="<s>", _snake_case : str="</s>", _snake_case : str="</s>", _snake_case : List[Any]="<s>", _snake_case : int="<unk>", _snake_case : List[str]="<pad>", _snake_case : str="<mask>", _snake_case : List[Any]=False, **_snake_case : List[str], ):
'''simple docstring'''
snake_case : Optional[Any] =AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else bos_token
snake_case : Optional[Any] =AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else eos_token
snake_case : Optional[int] =AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else sep_token
snake_case : Dict =AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else cls_token
snake_case : Tuple =AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else unk_token
snake_case : str =AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case : Optional[Any] =AddedToken(_snake_case, lstrip=_snake_case, rstrip=_snake_case ) if isinstance(_snake_case, _snake_case ) else mask_token
super().__init__(
errors=_snake_case, bos_token=_snake_case, eos_token=_snake_case, unk_token=_snake_case, sep_token=_snake_case, cls_token=_snake_case, pad_token=_snake_case, mask_token=_snake_case, add_prefix_space=_snake_case, **_snake_case, )
with open(_snake_case, encoding='''utf-8''' ) as vocab_handle:
snake_case : Optional[Any] =json.load(_snake_case )
snake_case : Optional[int] ={v: k for k, v in self.encoder.items()}
snake_case : List[str] =errors # how to handle errors in decoding
snake_case : str =bytes_to_unicode()
snake_case : List[str] ={v: k for k, v in self.byte_encoder.items()}
with open(_snake_case, encoding='''utf-8''' ) as merges_handle:
snake_case : Optional[Any] =merges_handle.read().split('''\n''' )[1:-1]
snake_case : str =[tuple(merge.split() ) for merge in bpe_merges]
snake_case : List[Any] =dict(zip(_snake_case, range(len(_snake_case ) ) ) )
snake_case : int ={}
snake_case : List[str] =add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case : Optional[Any] =re.compile(r'''\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+''' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __snake_case ( self : Union[str, Any] ):
'''simple docstring'''
return len(self.encoder )
def __snake_case ( self : Any ):
'''simple docstring'''
return dict(self.encoder, **self.added_tokens_encoder )
def __snake_case ( self : Dict, _snake_case : Optional[int] ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
snake_case : List[str] =tuple(_snake_case )
snake_case : Any =get_pairs(_snake_case )
if not pairs:
return token
while True:
snake_case : List[str] =min(_snake_case, key=lambda _snake_case : self.bpe_ranks.get(_snake_case, float('''inf''' ) ) )
if bigram not in self.bpe_ranks:
break
snake_case , snake_case : List[Any] =bigram
snake_case : str =[]
snake_case : Tuple =0
while i < len(_snake_case ):
try:
snake_case : Union[str, Any] =word.index(_snake_case, _snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case : Optional[int] =j
if word[i] == first and i < len(_snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case : Tuple =tuple(_snake_case )
snake_case : Tuple =new_word
if len(_snake_case ) == 1:
break
else:
snake_case : Optional[Any] =get_pairs(_snake_case )
snake_case : Union[str, Any] =''' '''.join(_snake_case )
snake_case : Union[str, Any] =word
return word
def __snake_case ( self : str, _snake_case : List[str] ):
'''simple docstring'''
snake_case : Optional[Any] =[]
for token in re.findall(self.pat, _snake_case ):
snake_case : List[str] =''''''.join(
self.byte_encoder[b] for b in token.encode('''utf-8''' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_snake_case ).split(''' ''' ) )
return bpe_tokens
def __snake_case ( self : List[str], _snake_case : str ):
'''simple docstring'''
return self.encoder.get(_snake_case, self.encoder.get(self.unk_token ) )
def __snake_case ( self : Optional[int], _snake_case : Dict ):
'''simple docstring'''
return self.decoder.get(_snake_case )
def __snake_case ( self : List[str], _snake_case : Optional[Any] ):
'''simple docstring'''
snake_case : Any =''''''.join(_snake_case )
snake_case : int =bytearray([self.byte_decoder[c] for c in text] ).decode('''utf-8''', errors=self.errors )
return text
def __snake_case ( self : Dict, _snake_case : str, _snake_case : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_snake_case ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case : int =os.path.join(
_snake_case, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case : Optional[Any] =os.path.join(
_snake_case, (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] )
with open(_snake_case, '''w''', encoding='''utf-8''' ) as f:
f.write(json.dumps(self.encoder, indent=2, sort_keys=_snake_case, ensure_ascii=_snake_case ) + '''\n''' )
snake_case : int =0
with open(_snake_case, '''w''', encoding='''utf-8''' ) as writer:
writer.write('''#version: 0.2\n''' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda _snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
''' Please check that the tokenizer is not corrupted!''' )
snake_case : Union[str, Any] =token_index
writer.write(''' '''.join(_snake_case ) + '''\n''' )
index += 1
return vocab_file, merge_file
def __snake_case ( self : Union[str, Any], _snake_case : List[int], _snake_case : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case : Union[str, Any] =[self.cls_token_id]
snake_case : Optional[Any] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __snake_case ( self : Union[str, Any], _snake_case : List[int], _snake_case : Optional[List[int]] = None, _snake_case : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_snake_case, token_ids_a=_snake_case, already_has_special_tokens=_snake_case )
if token_ids_a is None:
return [1] + ([0] * len(_snake_case )) + [1]
return [1] + ([0] * len(_snake_case )) + [1, 1] + ([0] * len(_snake_case )) + [1]
def __snake_case ( self : Any, _snake_case : List[int], _snake_case : Optional[List[int]] = None ):
'''simple docstring'''
snake_case : int =[self.sep_token_id]
snake_case : List[str] =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __snake_case ( self : List[str], _snake_case : Tuple, _snake_case : Union[str, Any]=False, **_snake_case : List[Any] ):
'''simple docstring'''
snake_case : Optional[int] =kwargs.pop('''add_prefix_space''', self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_snake_case ) > 0 and not text[0].isspace()):
snake_case : int =''' ''' + text
return (text, kwargs)
def __snake_case ( self : List[str], _snake_case : Union[Dict[str, EncodedInput], BatchEncoding], _snake_case : Optional[int] = None, _snake_case : PaddingStrategy = PaddingStrategy.DO_NOT_PAD, _snake_case : Optional[int] = None, _snake_case : Optional[bool] = None, ):
'''simple docstring'''
snake_case : Optional[Any] =super()._pad(
encoded_inputs=_snake_case, max_length=_snake_case, padding_strategy=_snake_case, pad_to_multiple_of=_snake_case, return_attention_mask=_snake_case, )
# Load from model defaults
if return_attention_mask is None:
snake_case : Dict ='''attention_mask''' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case : Union[str, Any] =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case : Union[str, Any] =len(encoded_inputs['''global_attention_mask'''] ) != len(_snake_case )
if needs_to_be_padded:
snake_case : Any =len(_snake_case ) - len(encoded_inputs['''global_attention_mask'''] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case : Union[str, Any] =(
encoded_inputs['''global_attention_mask'''] + [-1] * difference
)
elif self.padding_side == "left":
snake_case : Optional[Any] =[-1] * difference + encoded_inputs[
'''global_attention_mask'''
]
else:
raise ValueError('''Invalid padding strategy:''' + str(self.padding_side ) )
return encoded_inputs
| 136 |
'''simple docstring'''
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
A : Union[str, Any] = TypeVar("""T""")
class lowerCAmelCase_ ( Generic[T] ):
def __init__( self : Union[str, Any], _snake_case : bool = True ):
'''simple docstring'''
snake_case : dict[T, list[T]] ={} # dictionary of lists
snake_case : List[str] =directed
def __snake_case ( self : Any, _snake_case : T, _snake_case : T ):
'''simple docstring'''
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
self.adj_list[destination_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : List[Any] =[source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_snake_case )
snake_case : int =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
snake_case : Any =[destination_vertex]
snake_case : str =[source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_snake_case )
snake_case : str =[]
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
snake_case : Optional[int] =[destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
snake_case : str =[destination_vertex]
snake_case : Union[str, Any] =[]
return self
def __repr__( self : Tuple ):
'''simple docstring'''
return pformat(self.adj_list )
| 136 | 1 |
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( *lowerCAmelCase__ ):
if not isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ):
lowerCamelCase_ : List[Any] = list(lowerCAmelCase__ )
for i in range(len(lowerCAmelCase__ ) ):
lowerCamelCase_ : str = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ):
lowerCamelCase_ : Dict = [
'CUDA out of memory.', # CUDA OOM
'cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.', # CUDNN SNAFU
'DefaultCPUAllocator: can\'t allocate memory', # CPU OOM
]
if isinstance(lowerCAmelCase__ ,lowerCAmelCase__ ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ = None ,lowerCAmelCase__ = 1_28 ):
if function is None:
return functools.partial(lowerCAmelCase__ ,starting_batch_size=lowerCAmelCase__ )
lowerCamelCase_ : List[str] = starting_batch_size
def decorator(*lowerCAmelCase__ ,**lowerCAmelCase__ ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowerCamelCase_ : int = list(inspect.signature(lowerCAmelCase__ ).parameters.keys() )
# Guard against user error
if len(lowerCAmelCase__ ) < (len(lowerCAmelCase__ ) + 1):
lowerCamelCase_ : Dict = ', '.join([F"{arg}={value}" for arg, value in zip(params[1:] ,args[1:] )] )
raise TypeError(
F"Batch size was passed into `{function.__name__}` as the first argument when called."
F"Remove this as the decorator already does so: `{function.__name__}({arg_str})`" )
while True:
if batch_size == 0:
raise RuntimeError('No executable batch size found, reached zero.' )
try:
return function(lowerCAmelCase__ ,*lowerCAmelCase__ ,**lowerCAmelCase__ )
except Exception as e:
if should_reduce_batch_size(lowerCAmelCase__ ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 364 |
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_lowercase : int =logging.get_logger(__name__)
class UpperCamelCase_ ( snake_case__ ):
def __init__( self : Tuple , *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any] ):
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 364 | 1 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def __UpperCAmelCase ( UpperCamelCase__ :dict , UpperCamelCase__ :str , UpperCamelCase__ :set , UpperCamelCase__ :set , UpperCamelCase__ :dict , UpperCamelCase__ :dict , UpperCamelCase__ :PriorityQueue , UpperCamelCase__ :dict , UpperCamelCase__ :float | int , ) -> float | int:
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
snake_case__ : Union[str, Any] = cst_fwd.get(UpperCamelCase__ , np.inf )
snake_case__ : str = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
snake_case__ : Any = new_cost_f
snake_case__ : int = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
snake_case__ : Union[str, Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def __UpperCAmelCase ( UpperCamelCase__ :str , UpperCamelCase__ :str , UpperCamelCase__ :dict , UpperCamelCase__ :dict ) -> int:
snake_case__ : List[str] = -1
snake_case__ : str = set()
snake_case__ : int = set()
snake_case__ : str = {source: 0}
snake_case__ : List[str] = {destination: 0}
snake_case__ : Union[str, Any] = {source: None}
snake_case__ : Tuple = {destination: None}
snake_case__ : PriorityQueue[Any] = PriorityQueue()
snake_case__ : PriorityQueue[Any] = PriorityQueue()
snake_case__ : Any = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
snake_case__ , snake_case__ : Tuple = queue_forward.get()
visited_forward.add(UpperCamelCase__ )
snake_case__ , snake_case__ : int = queue_backward.get()
visited_backward.add(UpperCamelCase__ )
snake_case__ : Tuple = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
snake_case__ : Tuple = pass_and_relaxation(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
snake_case__ : Union[str, Any] = shortest_distance
return shortest_path_distance
_lowercase : Dict ={
"B": [["C", 1]],
"C": [["D", 1]],
"D": [["F", 1]],
"E": [["B", 1], ["G", 2]],
"F": [],
"G": [["F", 1]],
}
_lowercase : List[Any] ={
"B": [["E", 1]],
"C": [["B", 1]],
"D": [["C", 1]],
"F": [["D", 1], ["G", 1]],
"E": [[None, np.inf]],
"G": [["E", 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 574 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowercase : Any ={
"configuration_conditional_detr": [
"CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP",
"ConditionalDetrConfig",
"ConditionalDetrOnnxConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Dict =["ConditionalDetrFeatureExtractor"]
_lowercase : Optional[int] =["ConditionalDetrImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : Optional[Any] =[
"CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConditionalDetrForObjectDetection",
"ConditionalDetrForSegmentation",
"ConditionalDetrModel",
"ConditionalDetrPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 574 | 1 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class A:
'''simple docstring'''
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42 # [batch_size x 3]
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
def a__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def a__ ( self : Dict ) -> Union[str, Any]:
"""simple docstring"""
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def a__ ( self : List[Any] ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase_ = torch.arange(self.height * self.width )
lowerCamelCase_ = torch.stack(
[
pixel_indices % self.width,
torch.div(A_ , self.width , rounding_mode='trunc' ),
] , axis=1 , )
return coords
@property
def a__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
lowerCamelCase_ , *lowerCamelCase_ = self.shape
lowerCamelCase_ = int(np.prod(A_ ) )
lowerCamelCase_ = self.get_image_coords()
lowerCamelCase_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCamelCase_ = self.get_camera_rays(A_ )
lowerCamelCase_ = rays.view(A_ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def a__ ( self : int , A_ : torch.Tensor ) -> torch.Tensor:
"""simple docstring"""
lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCamelCase_ = coords.view(A_ , -1 , 2 )
lowerCamelCase_ = self.resolution()
lowerCamelCase_ = self.fov()
lowerCamelCase_ = (flat.float() / (res - 1)) * 2 - 1
lowerCamelCase_ = fracs * torch.tan(fov / 2 )
lowerCamelCase_ = fracs.view(A_ , -1 , 2 )
lowerCamelCase_ = (
self.z.view(A_ , 1 , 3 )
+ self.x.view(A_ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(A_ , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCamelCase_ = directions / directions.norm(dim=-1 , keepdim=A_ )
lowerCamelCase_ = torch.stack(
[
torch.broadcast_to(self.origin.view(A_ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(A_ , *A_ , 2 , 3 )
def a__ ( self : Any , A_ : int , A_ : int ) -> "DifferentiableProjectiveCamera":
"""simple docstring"""
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=A_ , height=A_ , x_fov=self.x_fov , y_fov=self.y_fov , )
def _SCREAMING_SNAKE_CASE ( lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
lowerCamelCase_ = np.array([np.sin(lowercase ), np.cos(lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCamelCase_ = -z * 4
lowerCamelCase_ = np.array([np.cos(lowercase ), -np.sin(lowercase ), 0.0] )
lowerCamelCase_ = np.cross(lowercase , lowercase )
origins.append(lowercase )
xs.append(lowercase )
ys.append(lowercase )
zs.append(lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowercase , axis=0 ) ).float() , width=lowercase , height=lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowercase )) , )
| 70 |
def _A ( lowerCamelCase = 200 ):
a__ : List[str] = [1, 2, 5, 10, 20, 50, 100, 200]
a__ : Dict = [0] * (pence + 1)
a__ : int = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCamelCase , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(2_0_0) == 7_3_6_8_2
| 112 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
def __init__( self :Optional[int] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Dict ):
super().__init__()
# make sure scheduler can always be converted to DDIM
UpperCamelCase__ :int = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
@torch.no_grad()
def __call__( self :List[Any] , lowerCamelCase__ :int = 1 , lowerCamelCase__ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ :float = 0.0 , lowerCamelCase__ :int = 50 , lowerCamelCase__ :Optional[bool] = None , lowerCamelCase__ :Optional[str] = "pil" , lowerCamelCase__ :bool = True , ):
# Sample gaussian noise to begin loop
if isinstance(self.unet.config.sample_size , lowerCamelCase__ ):
UpperCamelCase__ :Optional[int] = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
UpperCamelCase__ :Optional[Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
UpperCamelCase__ :str = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCamelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCamelCase__ :str = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
UpperCamelCase__ :Optional[Any] = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ ).prev_sample
UpperCamelCase__ :Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCamelCase__ :List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCamelCase__ :List[str] = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ ) | 717 |
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
"google/tapas-base-finetuned-sqa": (
"https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"
),
"google/tapas-base-finetuned-wtq": (
"https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"
),
"google/tapas-base-finetuned-wikisql-supervised": (
"https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"
),
"google/tapas-base-finetuned-tabfact": (
"https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"
),
}
class lowerCAmelCase_ ( lowercase ):
"""simple docstring"""
_snake_case : Dict = """tapas"""
def __init__( self :List[Any] , lowerCamelCase__ :List[str]=3_05_22 , lowerCamelCase__ :str=7_68 , lowerCamelCase__ :List[Any]=12 , lowerCamelCase__ :Any=12 , lowerCamelCase__ :Tuple=30_72 , lowerCamelCase__ :int="gelu" , lowerCamelCase__ :Dict=0.1 , lowerCamelCase__ :str=0.1 , lowerCamelCase__ :List[str]=10_24 , lowerCamelCase__ :List[Any]=[3, 2_56, 2_56, 2, 2_56, 2_56, 10] , lowerCamelCase__ :Tuple=0.02 , lowerCamelCase__ :str=1e-12 , lowerCamelCase__ :str=0 , lowerCamelCase__ :Optional[int]=10.0 , lowerCamelCase__ :int=0 , lowerCamelCase__ :Dict=1.0 , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any=None , lowerCamelCase__ :Optional[int]=1.0 , lowerCamelCase__ :Union[str, Any]=1.0 , lowerCamelCase__ :Optional[int]=False , lowerCamelCase__ :List[Any]=False , lowerCamelCase__ :Any="ratio" , lowerCamelCase__ :int=None , lowerCamelCase__ :Union[str, Any]=None , lowerCamelCase__ :int=64 , lowerCamelCase__ :int=32 , lowerCamelCase__ :List[str]=False , lowerCamelCase__ :Optional[int]=True , lowerCamelCase__ :str=False , lowerCamelCase__ :int=False , lowerCamelCase__ :Optional[Any]=True , lowerCamelCase__ :Optional[Any]=False , lowerCamelCase__ :str=None , lowerCamelCase__ :List[Any]=None , **lowerCamelCase__ :Optional[Any] , ):
super().__init__(pad_token_id=lowerCamelCase__ , **lowerCamelCase__ )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
UpperCamelCase__ :List[Any] = vocab_size
UpperCamelCase__ :Optional[int] = hidden_size
UpperCamelCase__ :Any = num_hidden_layers
UpperCamelCase__ :str = num_attention_heads
UpperCamelCase__ :Dict = hidden_act
UpperCamelCase__ :Tuple = intermediate_size
UpperCamelCase__ :int = hidden_dropout_prob
UpperCamelCase__ :List[str] = attention_probs_dropout_prob
UpperCamelCase__ :Any = max_position_embeddings
UpperCamelCase__ :List[Any] = type_vocab_sizes
UpperCamelCase__ :List[Any] = initializer_range
UpperCamelCase__ :List[str] = layer_norm_eps
# Fine-tuning task hyperparameters
UpperCamelCase__ :List[str] = positive_label_weight
UpperCamelCase__ :int = num_aggregation_labels
UpperCamelCase__ :str = aggregation_loss_weight
UpperCamelCase__ :Optional[Any] = use_answer_as_supervision
UpperCamelCase__ :Tuple = answer_loss_importance
UpperCamelCase__ :Dict = use_normalized_answer_loss
UpperCamelCase__ :Optional[Any] = huber_loss_delta
UpperCamelCase__ :Any = temperature
UpperCamelCase__ :Union[str, Any] = aggregation_temperature
UpperCamelCase__ :Tuple = use_gumbel_for_cells
UpperCamelCase__ :Tuple = use_gumbel_for_aggregation
UpperCamelCase__ :Optional[int] = average_approximation_function
UpperCamelCase__ :Optional[Any] = cell_selection_preference
UpperCamelCase__ :Any = answer_loss_cutoff
UpperCamelCase__ :Dict = max_num_rows
UpperCamelCase__ :Optional[int] = max_num_columns
UpperCamelCase__ :Tuple = average_logits_per_cell
UpperCamelCase__ :Any = select_one_column
UpperCamelCase__ :Dict = allow_empty_column_selection
UpperCamelCase__ :Union[str, Any] = init_cell_selection_weights_to_zero
UpperCamelCase__ :Optional[Any] = reset_position_index_per_cell
UpperCamelCase__ :List[str] = disable_per_token_loss
# Aggregation hyperparameters
UpperCamelCase__ :Tuple = aggregation_labels
UpperCamelCase__ :str = no_aggregation_label_index
if isinstance(self.aggregation_labels , lowerCamelCase__ ):
UpperCamelCase__ :Optional[Any] = {int(lowerCamelCase__ ): v for k, v in aggregation_labels.items()} | 383 | 0 |
"""simple docstring"""
import argparse
import struct
import unittest
class lowercase :
def __init__( self , lowercase ) -> None:
lowerCAmelCase = data
# Initialize hash values
lowerCAmelCase = [
0X6A_09E_667,
0XBB_67A_E85,
0X3C_6EF_372,
0XA5_4FF_53A,
0X51_0E5_27F,
0X9B_056_88C,
0X1F_83D_9AB,
0X5B_E0C_D19,
]
# Initialize round constants
lowerCAmelCase = [
0X42_8A2_F98,
0X71_374_491,
0XB5_C0F_BCF,
0XE9_B5D_BA5,
0X39_56C_25B,
0X59_F11_1F1,
0X92_3F8_2A4,
0XAB_1C5_ED5,
0XD8_07A_A98,
0X12_835_B01,
0X24_318_5BE,
0X55_0C7_DC3,
0X72_BE5_D74,
0X80_DEB_1FE,
0X9B_DC0_6A7,
0XC1_9BF_174,
0XE4_9B6_9C1,
0XEF_BE4_786,
0X0F_C19_DC6,
0X24_0CA_1CC,
0X2D_E92_C6F,
0X4A_748_4AA,
0X5C_B0A_9DC,
0X76_F98_8DA,
0X98_3E5_152,
0XA8_31C_66D,
0XB0_032_7C8,
0XBF_597_FC7,
0XC6_E00_BF3,
0XD5_A79_147,
0X06_CA6_351,
0X14_292_967,
0X27_B70_A85,
0X2E_1B2_138,
0X4D_2C6_DFC,
0X53_380_D13,
0X65_0A7_354,
0X76_6A0_ABB,
0X81_C2C_92E,
0X92_722_C85,
0XA2_BFE_8A1,
0XA8_1A6_64B,
0XC2_4B8_B70,
0XC7_6C5_1A3,
0XD1_92E_819,
0XD6_990_624,
0XF4_0E3_585,
0X10_6AA_070,
0X19_A4C_116,
0X1E_376_C08,
0X27_487_74C,
0X34_B0B_CB5,
0X39_1C0_CB3,
0X4E_D8A_A4A,
0X5B_9CC_A4F,
0X68_2E6_FF3,
0X74_8F8_2EE,
0X78_A56_36F,
0X84_C87_814,
0X8C_C70_208,
0X90_BEF_FFA,
0XA4_506_CEB,
0XBE_F9A_3F7,
0XC6_717_8F2,
]
lowerCAmelCase = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _snake_case ( lowercase ) -> bytes:
lowerCAmelCase = b"""\x80""" + (b"""\x00""" * (63 - (len(lowercase ) + 8) % 64))
lowerCAmelCase = struct.pack(""">Q""" , (len(lowercase ) * 8) )
return data + padding + big_endian_integer
def _snake_case ( self ) -> None:
# Convert into blocks of 64 bytes
lowerCAmelCase = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
lowerCAmelCase = list(struct.unpack(""">16L""" , lowercase ) )
# add 48 0-ed integers
words += [0] * 48
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
lowerCAmelCase = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
lowerCAmelCase = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
lowerCAmelCase = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X100_000_000
# Compression
lowerCAmelCase = self.ror(lowercase , 6 ) ^ self.ror(lowercase , 11 ) ^ self.ror(lowercase , 25 )
lowerCAmelCase = (e & f) ^ ((~e & 0XFF_FFF_FFF) & g)
lowerCAmelCase = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X100_000_000
lowerCAmelCase = self.ror(lowercase , 2 ) ^ self.ror(lowercase , 13 ) ^ self.ror(lowercase , 22 )
lowerCAmelCase = (a & b) ^ (a & c) ^ (b & c)
lowerCAmelCase = (sa + maj) % 0X100_000_000
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = (
g,
f,
e,
((d + tempa) % 0X100_000_000),
c,
b,
a,
((tempa + tempa) % 0X100_000_000),
)
lowerCAmelCase = [a, b, c, d, e, f, g, h]
# Modify final values
lowerCAmelCase = [
((element + mutated_hash_values[index]) % 0X100_000_000)
for index, element in enumerate(self.hashes )
]
lowerCAmelCase = """""".join([hex(lowercase )[2:].zfill(8 ) for value in self.hashes] )
def _snake_case ( self , lowercase , lowercase ) -> int:
return 0XFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> None:
import hashlib
lowerCAmelCase = bytes("""Test String""" , """utf-8""" )
self.assertEqual(SHAaaa(lowercase ).hash , hashlib.shaaaa(lowercase ).hexdigest() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
import doctest
doctest.testmod()
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
lowerCAmelCase = parser.parse_args()
lowerCAmelCase = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
lowerCAmelCase = f.read()
else:
lowerCAmelCase = bytes(SCREAMING_SNAKE_CASE , """utf-8""" )
print(SHAaaa(SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 532 |
"""simple docstring"""
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = """"""
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
lowerCAmelCase = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCAmelCase = remove_duplicates(key.upper() )
lowerCAmelCase = len(SCREAMING_SNAKE_CASE )
# First fill cipher with key characters
lowerCAmelCase = {alphabet[i]: char for i, char in enumerate(SCREAMING_SNAKE_CASE )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(SCREAMING_SNAKE_CASE ) , 26 ):
lowerCAmelCase = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCAmelCase = alphabet[i - offset]
lowerCAmelCase = char
return cipher_alphabet
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
return "".join(cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : dict[str, str] ):
'''simple docstring'''
lowerCAmelCase = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for ch in message.upper() )
def UpperCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase = input("""Enter message to encode or decode: """ ).strip()
lowerCAmelCase = input("""Enter keyword: """ ).strip()
lowerCAmelCase = input("""Encipher or decipher? E/D:""" ).strip()[0].lower()
try:
lowerCAmelCase = {"""e""": encipher, """d""": decipher}[option]
except KeyError:
raise KeyError("""invalid input option""" )
lowerCAmelCase = create_cipher_map(SCREAMING_SNAKE_CASE )
print(func(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 532 | 1 |
"""simple docstring"""
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
a : List[str] = open # noqa: we just need to have a builtin inside this module to test it properly
| 85 |
"""simple docstring"""
def lowercase__(A ) ->list[int]:
"""simple docstring"""
lowercase__ : List[str]= len(A )
for i in range(A ):
for j in range(i + 1 , A ):
if numbers[j] < numbers[i]:
lowercase__, lowercase__ : List[str]= numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
a : Dict = input("""Enter numbers separated by a comma:\n""").strip()
a : List[str] = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 85 | 1 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
__lowerCamelCase = "us-east-1" # defaults region
@dataclass
class _snake_case :
'''simple docstring'''
UpperCamelCase__ =42
UpperCamelCase__ ="""arn:aws:iam::558105141721:role/sagemaker_execution_role"""
UpperCamelCase__ ={
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5500,
}
UpperCamelCase__ ={**hyperparameters, """max_steps""": 1000}
@property
def snake_case_ ( self : Optional[int] ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def snake_case_ ( self : Optional[Any] ):
return f'{self.framework}-transfromers-test'
@property
def snake_case_ ( self : str ):
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def snake_case_ ( self : int ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='''class''' )
def a ( __snake_case : Tuple ):
'''simple docstring'''
UpperCAmelCase_ :str = SageMakerTestEnvironment(framework=request.cls.framework )
| 608 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase = {
"configuration_groupvit": [
"GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"GroupViTConfig",
"GroupViTOnnxConfig",
"GroupViTTextConfig",
"GroupViTVisionConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"GroupViTModel",
"GroupViTPreTrainedModel",
"GroupViTTextModel",
"GroupViTVisionModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase = [
"TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFGroupViTModel",
"TFGroupViTPreTrainedModel",
"TFGroupViTTextModel",
"TFGroupViTVisionModel",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCamelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 608 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase : Union[str, Any] = {
"""configuration_gpt_bigcode""": ["""GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTBigCodeConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTBigCodeForSequenceClassification""",
"""GPTBigCodeForTokenClassification""",
"""GPTBigCodeForCausalLM""",
"""GPTBigCodeModel""",
"""GPTBigCodePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 146 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class a :
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=7 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=99 , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=4 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.0 , _lowerCAmelCase=0.1 , _lowerCAmelCase=True , _lowerCAmelCase=512 , _lowerCAmelCase=16 , _lowerCAmelCase=2 , _lowerCAmelCase=0.02 , _lowerCAmelCase=3 , _lowerCAmelCase=4 , _lowerCAmelCase=None , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = parent
__SCREAMING_SNAKE_CASE: List[str] = batch_size
__SCREAMING_SNAKE_CASE: Any = seq_length
__SCREAMING_SNAKE_CASE: List[str] = is_training
__SCREAMING_SNAKE_CASE: int = use_input_mask
__SCREAMING_SNAKE_CASE: Union[str, Any] = use_token_type_ids
__SCREAMING_SNAKE_CASE: Optional[int] = use_labels
__SCREAMING_SNAKE_CASE: List[str] = vocab_size
__SCREAMING_SNAKE_CASE: Any = hidden_size
__SCREAMING_SNAKE_CASE: Optional[Any] = num_hidden_layers
__SCREAMING_SNAKE_CASE: int = num_attention_heads
__SCREAMING_SNAKE_CASE: Any = intermediate_multiple_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = hidden_act
__SCREAMING_SNAKE_CASE: str = hidden_dropout
__SCREAMING_SNAKE_CASE: Optional[Any] = attention_dropout
__SCREAMING_SNAKE_CASE: Optional[int] = weight_tying
__SCREAMING_SNAKE_CASE: List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE: Optional[Any] = type_vocab_size
__SCREAMING_SNAKE_CASE: List[str] = type_sequence_label_size
__SCREAMING_SNAKE_CASE: Union[str, Any] = initializer_range
__SCREAMING_SNAKE_CASE: List[Any] = num_labels
__SCREAMING_SNAKE_CASE: List[str] = num_choices
__SCREAMING_SNAKE_CASE: List[Any] = scope
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__SCREAMING_SNAKE_CASE: Tuple = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE: Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE: Optional[Any] = None
if self.use_labels:
__SCREAMING_SNAKE_CASE: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__SCREAMING_SNAKE_CASE: Optional[Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case_ ( self ):
"""simple docstring"""
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCAmelCase , initializer_range=self.initializer_range , )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE: Any = True
return config, input_ids, input_mask, token_labels
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: str = GPTNeoXJapaneseModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Optional[Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Tuple = model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[Any] = True
__SCREAMING_SNAKE_CASE: Union[str, Any] = GPTNeoXJapaneseModel(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
__SCREAMING_SNAKE_CASE: Dict = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def snake_case_ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = True
__SCREAMING_SNAKE_CASE: Optional[int] = GPTNeoXJapaneseForCausalLM(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
# first forward pass
__SCREAMING_SNAKE_CASE: Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , use_cache=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Union[str, Any] = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__SCREAMING_SNAKE_CASE: int = ids_tensor((self.batch_size, 3) , config.vocab_size )
__SCREAMING_SNAKE_CASE: Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__SCREAMING_SNAKE_CASE: str = torch.cat([input_ids, next_tokens] , dim=-1 )
__SCREAMING_SNAKE_CASE: Any = torch.cat([input_mask, next_mask] , dim=-1 )
__SCREAMING_SNAKE_CASE: List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: List[Any] = output_from_no_past['''hidden_states'''][0]
__SCREAMING_SNAKE_CASE: Optional[int] = model(
_lowerCAmelCase , attention_mask=_lowerCAmelCase , past_key_values=_lowerCAmelCase , output_hidden_states=_lowerCAmelCase , )['''hidden_states'''][0]
# select random slice
__SCREAMING_SNAKE_CASE: Any = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__SCREAMING_SNAKE_CASE: List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__SCREAMING_SNAKE_CASE: Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 ) )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: int = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: str = config_and_inputs
__SCREAMING_SNAKE_CASE: Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( __lowercase ,__lowercase ,unittest.TestCase ):
SCREAMING_SNAKE_CASE__ : List[str] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : Tuple = (
{'''feature-extraction''': GPTNeoXJapaneseModel, '''text-generation''': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : str = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : List[Any] = False
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Dict = GPTNeoXJapaneseModelTester(self )
__SCREAMING_SNAKE_CASE: str = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 )
def snake_case_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Dict = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: int = self.model_tester.prepare_config_and_inputs_for_decoder()
__SCREAMING_SNAKE_CASE: int = None
self.model_tester.create_and_check_model_as_decoder(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCAmelCase )
@slow
def snake_case_ ( self ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE: Optional[Any] = '''abeja/gpt-neox-japanese-2.7b'''
__SCREAMING_SNAKE_CASE: List[Any] = ['''データサイエンティストとは、''', '''100年後に必要とされる会社は、''', '''フルリモートの環境で働くために必要なことは、''', '''国境の長いトンネルを抜けると''', '''美味しい日本食といえば、''']
__SCREAMING_SNAKE_CASE: List[Any] = [
'''データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。''',
'''100年後に必要とされる会社は、「人」が中心の会社です。''',
'''フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。''',
'''国境の長いトンネルを抜けると、そこは雪国だった。''',
'''美味しい日本食といえば、やっぱりお寿司ですよね。''',
]
__SCREAMING_SNAKE_CASE: List[str] = GPTNeoXJapaneseTokenizer.from_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Any = GPTNeoXJapaneseForCausalLM.from_pretrained(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE: Optional[Any] = []
for prompt in prompts:
__SCREAMING_SNAKE_CASE: Any = tokenizer(_lowerCAmelCase , return_tensors='''pt''' ).input_ids
__SCREAMING_SNAKE_CASE: int = model.generate(_lowerCAmelCase , max_length=50 )
__SCREAMING_SNAKE_CASE: str = tokenizer.batch_decode(_lowerCAmelCase , skip_special_tokens=_lowerCAmelCase )
predicted_outputs += generated_string
self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
| 146 | 1 |
__snake_case : str ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Optional[Any] =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : List[str] ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 647 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase__ ( lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =KandinskyVaaPipeline
snake_case_ =[
"""image_embeds""",
"""negative_image_embeds""",
]
snake_case_ =["""image_embeds""", """negative_image_embeds"""]
snake_case_ =[
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
snake_case_ =False
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return 32
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return 32
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return self.time_input_dim
@property
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
return self.time_input_dim * 4
@property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return 1_00
@property
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase__ : int = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = self.dummy_unet
lowerCAmelCase__ : int = self.dummy_movq
lowerCAmelCase__ : str = DDIMScheduler(
num_train_timesteps=10_00 ,beta_schedule='''linear''' ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=__lowerCamelCase ,set_alpha_to_one=__lowerCamelCase ,steps_offset=1 ,prediction_type='''epsilon''' ,thresholding=__lowerCamelCase ,)
lowerCAmelCase__ : Any = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=0 ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
lowerCAmelCase__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
__lowerCamelCase )
if str(__lowerCamelCase ).startswith('''mps''' ):
lowerCAmelCase__ : Dict = torch.manual_seed(__lowerCamelCase )
else:
lowerCAmelCase__ : Optional[int] = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Any = '''cpu'''
lowerCAmelCase__ : int = self.get_dummy_components()
lowerCAmelCase__ : Optional[Any] = self.pipeline_class(**__lowerCamelCase )
lowerCAmelCase__ : str = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : int = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
lowerCAmelCase__ : Optional[Any] = output.images
lowerCAmelCase__ : List[Any] = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) ,return_dict=__lowerCamelCase ,)[0]
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowerCAmelCase__ : Union[str, Any] = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' ,torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
lowerCAmelCase__ : List[Any] = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' ,torch_dtype=torch.floataa )
lowerCAmelCase__ : List[Any] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
lowerCAmelCase__ : str = '''red cat, 4k photo'''
lowerCAmelCase__ : Dict = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = pipe_prior(
__lowerCamelCase ,generator=__lowerCamelCase ,num_inference_steps=5 ,negative_prompt='''''' ,).to_tuple()
lowerCAmelCase__ : int = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase__ : Any = pipeline(
image_embeds=__lowerCamelCase ,negative_image_embeds=__lowerCamelCase ,generator=__lowerCamelCase ,num_inference_steps=1_00 ,output_type='''np''' ,)
lowerCAmelCase__ : List[str] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(__lowerCamelCase ,__lowerCamelCase )
| 647 | 1 |
import torch
from torch import nn
class a__ ( nn.Module ):
def __init__( self :Union[str, Any] , _lowerCamelCase :Optional[int] , _lowerCamelCase :Tuple , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :str , _lowerCamelCase :Optional[int]=1 , _lowerCamelCase :List[str]=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase_ : Optional[Any] =n_token
UpperCamelCase_ : List[Any] =d_embed
UpperCamelCase_ : Dict =d_proj
UpperCamelCase_ : Dict =cutoffs + [n_token]
UpperCamelCase_ : List[str] =[0] + self.cutoffs
UpperCamelCase_ : Union[str, Any] =div_val
UpperCamelCase_ : int =self.cutoffs[0]
UpperCamelCase_ : Dict =len(self.cutoffs ) - 1
UpperCamelCase_ : Dict =self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
UpperCamelCase_ : Any =nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
UpperCamelCase_ : Any =nn.Parameter(torch.zeros(self.n_clusters ) )
UpperCamelCase_ : int =nn.ModuleList()
UpperCamelCase_ : str =nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase ) ) )
else:
self.out_projs.append(_lowerCamelCase )
self.out_layers.append(nn.Linear(_lowerCamelCase , _lowerCamelCase ) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase_ : Union[str, Any] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ : Optional[int] =d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(_lowerCamelCase , _lowerCamelCase ) ) )
self.out_layers.append(nn.Linear(_lowerCamelCase , r_idx - l_idx ) )
UpperCamelCase_ : Optional[Any] =keep_order
def lowerCamelCase_ ( self :Dict , _lowerCamelCase :Dict , _lowerCamelCase :Dict , _lowerCamelCase :Union[str, Any] , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
if proj is None:
UpperCamelCase_ : Any =nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
UpperCamelCase_ : Dict =nn.functional.linear(_lowerCamelCase , proj.t().contiguous() )
UpperCamelCase_ : Dict =nn.functional.linear(_lowerCamelCase , _lowerCamelCase , bias=_lowerCamelCase )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :List[str] , _lowerCamelCase :Tuple=None , _lowerCamelCase :Dict=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
UpperCamelCase_ : Dict =hidden[..., :-1, :].contiguous()
UpperCamelCase_ : Dict =labels[..., 1:].contiguous()
UpperCamelCase_ : int =hidden.view(-1 , hidden.size(-1 ) )
UpperCamelCase_ : Optional[Any] =labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError('Input and labels should have the same size in the batch dimension.' )
else:
UpperCamelCase_ : Tuple =hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
UpperCamelCase_ : Any =self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
UpperCamelCase_ : List[str] =labels != -100
UpperCamelCase_ : List[Any] =torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase_ : Optional[int] =(
-nn.functional.log_softmax(_lowerCamelCase , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
UpperCamelCase_ : Tuple =nn.functional.log_softmax(_lowerCamelCase , dim=-1 )
else:
# construct weights and biases
UpperCamelCase_ : List[str] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase_ : Any =self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ : Optional[Any] =self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase_ : str =self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase_ : Union[str, Any] =self.out_layers[i].weight
UpperCamelCase_ : Dict =self.out_layers[i].bias
if i == 0:
UpperCamelCase_ : List[str] =torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase_ : List[str] =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_lowerCamelCase )
biases.append(_lowerCamelCase )
UpperCamelCase_ : Any =weights[0], biases[0], self.out_projs[0]
UpperCamelCase_ : Optional[int] =self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : str =nn.functional.log_softmax(_lowerCamelCase , dim=1 )
if labels is None:
UpperCamelCase_ : Any =hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
UpperCamelCase_ : List[Any] =torch.zeros_like(_lowerCamelCase , dtype=hidden.dtype , device=hidden.device )
UpperCamelCase_ : Dict =0
UpperCamelCase_ : List[Any] =[0] + self.cutoffs
for i in range(len(_lowerCamelCase ) - 1 ):
UpperCamelCase_ : List[Any] =cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
UpperCamelCase_ : List[str] =(labels >= l_idx) & (labels < r_idx)
UpperCamelCase_ : Optional[Any] =mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
UpperCamelCase_ : Any =labels.index_select(0 , _lowerCamelCase ) - l_idx
UpperCamelCase_ : str =head_logprob.index_select(0 , _lowerCamelCase )
UpperCamelCase_ : int =hidden.index_select(0 , _lowerCamelCase )
else:
UpperCamelCase_ : Dict =hidden
if i == 0:
if labels is not None:
UpperCamelCase_ : Optional[int] =head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase_ : Union[str, Any] =head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase_ : int =weights[i], biases[i], self.out_projs[i]
UpperCamelCase_ : Union[str, Any] =self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : Union[str, Any] =nn.functional.log_softmax(_lowerCamelCase , dim=1 )
UpperCamelCase_ : Any =self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
UpperCamelCase_ : Any =head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
UpperCamelCase_ : int =head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
UpperCamelCase_ : Union[str, Any] =logprob_i
if labels is not None:
if (hasattr(self , 'keep_order' ) and self.keep_order) or keep_order:
out.index_copy_(0 , _lowerCamelCase , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :List[str] ):
'''simple docstring'''
if self.n_clusters == 0:
UpperCamelCase_ : str =self._compute_logit(_lowerCamelCase , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(_lowerCamelCase , dim=-1 )
else:
# construct weights and biases
UpperCamelCase_ : Union[str, Any] =[], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
UpperCamelCase_ : Union[str, Any] =self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase_ : int =self.out_layers[0].weight[l_idx:r_idx]
UpperCamelCase_ : int =self.out_layers[0].bias[l_idx:r_idx]
else:
UpperCamelCase_ : str =self.out_layers[i].weight
UpperCamelCase_ : Union[str, Any] =self.out_layers[i].bias
if i == 0:
UpperCamelCase_ : str =torch.cat([weight_i, self.cluster_weight] , dim=0 )
UpperCamelCase_ : Dict =torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(_lowerCamelCase )
biases.append(_lowerCamelCase )
UpperCamelCase_ : Optional[int] =weights[0], biases[0], self.out_projs[0]
UpperCamelCase_ : str =self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : int =hidden.new_empty((head_logit.size(0 ), self.n_token) )
UpperCamelCase_ : List[str] =nn.functional.log_softmax(_lowerCamelCase , dim=1 )
UpperCamelCase_ : str =[0] + self.cutoffs
for i in range(len(_lowerCamelCase ) - 1 ):
UpperCamelCase_ : Any =cutoff_values[i], cutoff_values[i + 1]
if i == 0:
UpperCamelCase_ : Optional[int] =head_logprob[:, : self.cutoffs[0]]
else:
UpperCamelCase_ : List[str] =weights[i], biases[i], self.out_projs[i]
UpperCamelCase_ : str =self._compute_logit(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
UpperCamelCase_ : Dict =nn.functional.log_softmax(_lowerCamelCase , dim=1 )
UpperCamelCase_ : Optional[int] =head_logprob[:, -i] + tail_logprob_i
UpperCamelCase_ : str =logprob_i
return out
| 708 |
"""simple docstring"""
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
def A_ ( __lowercase , __lowercase , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = None , __lowercase = False , ):
UpperCamelCase_ : Dict =bnb_quantization_config.load_in_abit
UpperCamelCase_ : List[str] =bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'
' make sure you have the latest version of `bitsandbytes` installed.' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'
'make sure you have the latest version of `bitsandbytes` installed.' )
UpperCamelCase_ : Tuple =[]
# custom device map
if isinstance(__lowercase , __lowercase ) and len(device_map.keys() ) > 1:
UpperCamelCase_ : str =[key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
UpperCamelCase_ : Any =get_keys_to_not_convert(__lowercase )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(__lowercase )
UpperCamelCase_ : str =bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
UpperCamelCase_ : Optional[Any] =[]
UpperCamelCase_ : Any =bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(__lowercase )
# compatibility with peft
UpperCamelCase_ : str =load_in_abit
UpperCamelCase_ : Optional[int] =load_in_abit
UpperCamelCase_ : Any =get_parameter_device(__lowercase )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'It is not recommended to quantize a loaded model. '
'The model should be instantiated under the `init_empty_weights` context manager.' )
UpperCamelCase_ : int =replace_with_bnb_layers(__lowercase , __lowercase , modules_to_not_convert=__lowercase )
# convert param to the right dtype
UpperCamelCase_ : Dict =bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
UpperCamelCase_ : int =name.replace('.weight' , '' ).replace('.bias' , '' )
UpperCamelCase_ : Any =getattr(__lowercase , __lowercase , __lowercase )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(__lowercase ):
param.to(__lowercase )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'We move the model to cuda.' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
UpperCamelCase_ : List[Any] =replace_with_bnb_layers(
__lowercase , __lowercase , modules_to_not_convert=__lowercase )
UpperCamelCase_ : int =get_quantized_model_device_map(
__lowercase , __lowercase , __lowercase , max_memory=__lowercase , no_split_module_classes=__lowercase , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
UpperCamelCase_ : Union[str, Any] =True
UpperCamelCase_ : List[Any] =any(x in list(device_map.values() ) for x in ['cpu', 'disk'] )
load_checkpoint_in_model(
__lowercase , __lowercase , __lowercase , dtype=bnb_quantization_config.torch_dtype , offload_folder=__lowercase , offload_state_dict=__lowercase , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(__lowercase , device_map=__lowercase , offload_dir=__lowercase )
def A_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , __lowercase=None ):
if device_map is None:
if torch.cuda.is_available():
UpperCamelCase_ : int ={'': torch.cuda.current_device()}
else:
raise RuntimeError('No GPU found. A GPU is needed for quantization.' )
logger.info('The device_map was not initialized.' 'Setting device_map to `{\'\':torch.cuda.current_device()}`.' )
if isinstance(__lowercase , __lowercase ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '
'\'sequential\'.' )
UpperCamelCase_ : Optional[Any] ={}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
UpperCamelCase_ : List[str] ={}
UpperCamelCase_ : Optional[int] =special_dtypes
UpperCamelCase_ : Any =no_split_module_classes
UpperCamelCase_ : str =bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
UpperCamelCase_ : Dict =get_balanced_memory(
__lowercase , low_zero=(device_map == 'balanced_low_0') , max_memory=__lowercase , **__lowercase , )
UpperCamelCase_ : Any =max_memory
UpperCamelCase_ : Optional[int] =infer_auto_device_map(__lowercase , **__lowercase )
if isinstance(__lowercase , __lowercase ):
# check if don't have any quantized module on the cpu
UpperCamelCase_ : Dict =bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
UpperCamelCase_ : Union[str, Any] ={
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ' )
else:
logger.info(
'Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit' )
del device_map_without_some_modules
return device_map
def A_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None ):
if modules_to_not_convert is None:
UpperCamelCase_ : int =[]
UpperCamelCase_ , UpperCamelCase_ : List[Any] =_replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
if not has_been_replaced:
logger.warning(
'You are loading your model in 8bit or 4bit but no linear modules were found in your model.'
' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'
' Please double check your model architecture, or submit an issue on github if you think this is'
' a bug.' )
return model
def A_ ( __lowercase , __lowercase , __lowercase=None , __lowercase=None , ):
UpperCamelCase_ : Optional[Any] =False
for name, module in model.named_children():
if current_key_name is None:
UpperCamelCase_ : Any =[]
current_key_name.append(__lowercase )
if isinstance(__lowercase , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
UpperCamelCase_ : Union[str, Any] ='.'.join(__lowercase )
UpperCamelCase_ : Any =True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
UpperCamelCase_ : Dict =False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
UpperCamelCase_ : str =bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=__lowercase , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
UpperCamelCase_ : Union[str, Any] =bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('load_in_8bit and load_in_4bit can\'t be both False' )
UpperCamelCase_ : Union[str, Any] =module.weight.data
if module.bias is not None:
UpperCamelCase_ : str =module.bias.data
bnb_module.requires_grad_(__lowercase )
setattr(__lowercase , __lowercase , __lowercase )
UpperCamelCase_ : Optional[int] =True
if len(list(module.children() ) ) > 0:
UpperCamelCase_ , UpperCamelCase_ : Any =_replace_with_bnb_layers(
__lowercase , __lowercase , __lowercase , __lowercase )
UpperCamelCase_ : str =has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def A_ ( __lowercase ):
# Create a copy of the model
with init_empty_weights():
UpperCamelCase_ : int =deepcopy(__lowercase ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
UpperCamelCase_ : Optional[int] =find_tied_parameters(__lowercase )
# For compatibility with Accelerate < 0.18
if isinstance(__lowercase , __lowercase ):
UpperCamelCase_ : List[Any] =sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
UpperCamelCase_ : List[Any] =sum(__lowercase , [] )
UpperCamelCase_ : Any =len(__lowercase ) > 0
# Check if it is a base model
UpperCamelCase_ : Optional[Any] =False
if hasattr(__lowercase , 'base_model_prefix' ):
UpperCamelCase_ : List[Any] =not hasattr(__lowercase , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
UpperCamelCase_ : Optional[int] =list(model.named_children() )
UpperCamelCase_ : Union[str, Any] =[list_modules[-1][0]]
# add last module together with tied weights
UpperCamelCase_ : Optional[int] =set(__lowercase ) - set(__lowercase )
UpperCamelCase_ : List[str] =list(set(__lowercase ) ) + list(__lowercase )
# remove ".weight" from the keys
UpperCamelCase_ : Dict =['.weight', '.bias']
UpperCamelCase_ : Optional[Any] =[]
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
UpperCamelCase_ : Dict =name.replace(__lowercase , '' )
filtered_module_names.append(__lowercase )
return filtered_module_names
def A_ ( __lowercase ):
for m in model.modules():
if isinstance(__lowercase , bnb.nn.Linearabit ):
return True
return False
def A_ ( __lowercase ):
return next(parameter.parameters() ).device
def A_ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ):
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(__lowercase , __lowercase , 0 , dtype=__lowercase , value=__lowercase )
UpperCamelCase_ : Any =param_name
UpperCamelCase_ : Optional[int] =model
if "." in tensor_name:
UpperCamelCase_ : Union[str, Any] =tensor_name.split('.' )
for split in splits[:-1]:
UpperCamelCase_ : Optional[Any] =getattr(__lowercase , __lowercase )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
UpperCamelCase_ : int =new_module
UpperCamelCase_ : int =splits[-1]
# offload weights
UpperCamelCase_ : str =False
offload_weight(module._parameters[tensor_name] , __lowercase , __lowercase , index=__lowercase )
if hasattr(module._parameters[tensor_name] , 'SCB' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('weight' , 'SCB' ) , __lowercase , index=__lowercase , )
else:
offload_weight(__lowercase , __lowercase , __lowercase , index=__lowercase )
offload_weight(__lowercase , param_name.replace('weight' , 'SCB' ) , __lowercase , index=__lowercase )
set_module_tensor_to_device(__lowercase , __lowercase , 'meta' , dtype=__lowercase , value=torch.empty(*param.size() ) )
| 395 | 0 |
import math
def snake_case (UpperCamelCase : int ):
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case (UpperCamelCase : float = 0.1 ):
'''simple docstring'''
lowerCamelCase__ = 3
lowerCamelCase__ = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(UpperCamelCase )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 |
import qiskit
def snake_case (UpperCamelCase : int = 2 ):
'''simple docstring'''
lowerCamelCase__ = qubits
# Using Aer's simulator
lowerCamelCase__ = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase__ = qiskit.QuantumCircuit(UpperCamelCase , UpperCamelCase )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase ) ) , list(range(UpperCamelCase ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase__ = qiskit.execute(UpperCamelCase , UpperCamelCase , shots=1000 )
return job.result().get_counts(UpperCamelCase )
if __name__ == "__main__":
print(f'''Total count for various states are: {quantum_entanglement(3)}''')
| 165 | 1 |
'''simple docstring'''
import unittest
from pathlib import Path
from shutil import copyfile
from transformers import SPIECE_UNDERLINE, is_sentencepiece_available
from transformers.models.speech_to_text import SpeechaTextTokenizer
from transformers.models.speech_to_text.tokenization_speech_to_text import VOCAB_FILES_NAMES, save_json
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
lowercase__ : Optional[Any] = get_tests_dir('''fixtures/test_sentencepiece.model''')
if is_sentencepiece_available():
import sentencepiece as sp
lowercase__ : int = 5
lowercase__ : Any = 10
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE (a__ , unittest.TestCase ):
lowerCAmelCase = SpeechaTextTokenizer
lowerCAmelCase = False
lowerCAmelCase = True
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setUp()
__A : int = sp.SentencePieceProcessor()
spm_model.Load(_UpperCAmelCase)
__A : Tuple = ['<s>', '<pad>', '</s>', '<unk>']
vocab += [spm_model.IdToPiece(id_) for id_ in range(len(_UpperCAmelCase))]
__A : List[str] = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase))))
__A : Optional[int] = Path(self.tmpdirname)
save_json(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['vocab_file'])
if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists():
copyfile(_UpperCAmelCase , save_dir / VOCAB_FILES_NAMES['spm_file'])
__A : Optional[int] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
tokenizer.save_pretrained(self.tmpdirname)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = '<pad>'
__A : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase) , _UpperCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase) , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '<s>')
self.assertEqual(vocab_keys[1] , '<pad>')
self.assertEqual(vocab_keys[-1] , 'j')
self.assertEqual(len(_UpperCAmelCase) , 1001)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1001)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Union[str, Any] = SpeechaTextTokenizer.from_pretrained(self.tmpdirname)
__A : Optional[Any] = tokenizer.tokenize('This is a test')
self.assertListEqual(_UpperCAmelCase , ['▁This', '▁is', '▁a', '▁t', 'est'])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , [289, 50, 14, 174, 386] , )
__A : Tuple = tokenizer.tokenize('I was born in 92000, and this is falsé.')
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
__A : List[str] = tokenizer.convert_tokens_to_ids(_UpperCAmelCase)
self.assertListEqual(_UpperCAmelCase , [12, 25, 88, 59, 28, 23, 11, 4, 606, 351, 351, 351, 7, 16, 70, 50, 76, 84, 10, 4, 8])
__A : str = tokenizer.convert_ids_to_tokens(_UpperCAmelCase)
self.assertListEqual(
_UpperCAmelCase , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = {'input_ids': [[3791, 797, 31, 11, 64, 797, 31, 2429, 433, 12, 1176, 12, 20, 786, 915, 142, 2413, 240, 37, 3238, 797, 31, 11, 35, 93, 915, 142, 2413, 240, 37, 5540, 567, 1276, 93, 37, 610, 40, 62, 455, 657, 1042, 123, 780, 177, 37, 309, 241, 1298, 514, 20, 292, 2737, 114, 2469, 241, 85, 64, 302, 548, 528, 423, 4, 509, 406, 423, 37, 601, 4, 777, 302, 548, 528, 423, 284, 4, 3388, 511, 459, 4, 3555, 40, 321, 302, 705, 4, 3388, 511, 583, 326, 5, 5, 5, 62, 3310, 560, 177, 2680, 217, 1508, 32, 31, 853, 418, 64, 583, 511, 1605, 62, 35, 93, 560, 177, 2680, 217, 1508, 1521, 64, 583, 511, 519, 62, 20, 1515, 764, 20, 149, 261, 5625, 7972, 20, 5540, 567, 1276, 93, 3925, 1675, 11, 15, 802, 7972, 576, 217, 1508, 11, 35, 93, 1253, 2441, 15, 289, 652, 31, 416, 321, 3842, 115, 40, 911, 8, 476, 619, 4, 380, 142, 423, 335, 240, 35, 93, 264, 8, 11, 335, 569, 420, 163, 5, 2], [260, 548, 528, 423, 20, 451, 20, 2681, 1153, 3434, 20, 5540, 37, 567, 126, 1253, 2441, 3376, 449, 210, 431, 1563, 177, 767, 5540, 11, 1203, 472, 11, 2953, 685, 285, 364, 706, 1153, 20, 6799, 20, 2869, 20, 4464, 126, 40, 2429, 20, 1040, 866, 2664, 418, 20, 318, 20, 1726, 186, 20, 265, 522, 35, 93, 2191, 4634, 20, 1040, 12, 6799, 15, 228, 2356, 142, 31, 11, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2575, 2666, 684, 1582, 1176, 12, 627, 149, 619, 20, 4902, 563, 11, 20, 149, 261, 3420, 2356, 174, 142, 4714, 131, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name='facebook/s2t-small-mustc-en-de-st' , revision='a14f04cf0776c02f62a8cb800cf7909e15ea23ad' , )
@require_sentencepiece
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
lowerCAmelCase = '''valhalla/s2t_mustc_multilinguial_medium'''
lowerCAmelCase = '''C\'est trop cool'''
lowerCAmelCase = '''Esto es genial'''
@classmethod
def SCREAMING_SNAKE_CASE ( cls):
'''simple docstring'''
__A : SpeechaTextTokenizer = SpeechaTextTokenizer.from_pretrained(cls.checkpoint_name)
return cls
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertEqual(self.tokenizer.lang_code_to_id['pt'] , 4)
self.assertEqual(self.tokenizer.lang_code_to_id['ru'] , 6)
self.assertEqual(self.tokenizer.lang_code_to_id['it'] , 9)
self.assertEqual(self.tokenizer.lang_code_to_id['de'] , 11)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertEqual(self.tokenizer.vocab_size , 1_0000)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids)
__A : List[str] = [ES_CODE, 4, 1601, 47, 7647, 2]
__A : Any = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
__A : int = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = 'fr'
__A : Any = self.tokenizer(self.french_text).input_ids
self.assertEqual(encoded[0] , _UpperCAmelCase)
self.assertEqual(encoded[-1] , self.tokenizer.eos_token_id)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = 'fr'
self.assertListEqual(self.tokenizer.prefix_tokens , [FR_CODE])
__A : Optional[Any] = 'es'
self.assertListEqual(self.tokenizer.prefix_tokens , [ES_CODE]) | 338 |
'''simple docstring'''
lowercase__ : List[Any] = '''Input must be a string of 8 numbers plus letter'''
lowercase__ : Optional[Any] = '''TRWAGMYFPDXBNJZSQVHLCKE'''
def _lowerCAmelCase ( __snake_case : str ) -> bool:
if not isinstance(__snake_case , __snake_case ):
__A : str = f'Expected string as input, found {type(__snake_case ).__name__}'
raise TypeError(__snake_case )
__A : Any = spanish_id.replace('-' , '' ).upper()
if len(__snake_case ) != 9:
raise ValueError(__snake_case )
try:
__A : Optional[Any] = int(spanish_id_clean[0:8] )
__A : int = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__snake_case ) from ex
if letter.isdigit():
raise ValueError(__snake_case )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod() | 338 | 1 |
def a__ ( A_, A_ = " " ):
'''simple docstring'''
__magic_name__ = []
__magic_name__ = 0
for index, char in enumerate(UpperCamelCase__ ):
if char == separator:
split_words.append(string[last_index:index] )
__magic_name__ = index + 1
elif index + 1 == len(UpperCamelCase__ ):
split_words.append(string[last_index : index + 1] )
return split_words
if __name__ == "__main__":
from doctest import testmod
testmod()
| 529 |
"""simple docstring"""
def lowerCamelCase_ (UpperCamelCase__ : str ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 506 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase : List[str] = get_tests_dir('fixtures/test_sentencepiece.model')
lowerCAmelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
lowerCAmelCase : Union[str, Any] = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( snake_case_ , unittest.TestCase):
lowerCAmelCase_ = CamembertTokenizer
lowerCAmelCase_ = CamembertTokenizerFast
lowerCAmelCase_ = True
lowerCAmelCase_ = True
def UpperCAmelCase_ ( self )-> Any:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = '<pad>'
UpperCamelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) , A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) , A_ )
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>NOTUSED' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(A_ ) , 1004 )
def UpperCAmelCase_ ( self )-> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = CamembertTokenizer(A_ )
tokenizer.save_pretrained(self.tmpdirname )
UpperCamelCase = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = tokenizer.encode(A_ )
UpperCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCamelCase = tokenizer.convert_ids_to_tokens(A_ )
UpperCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
def UpperCAmelCase_ ( self )-> List[Any]:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
UpperCamelCase = self.get_tokenizer()
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = 'I was born in 92000, and this is falsé.'
UpperCamelCase = tokenizer.tokenize(A_ )
UpperCamelCase = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = tokenizer.encode(A_ , add_special_tokens=A_ )
UpperCamelCase = rust_tokenizer.encode(A_ , add_special_tokens=A_ )
self.assertListEqual(A_ , A_ )
UpperCamelCase = self.get_rust_tokenizer()
UpperCamelCase = tokenizer.encode(A_ )
UpperCamelCase = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ , A_ )
@slow
def UpperCAmelCase_ ( self )-> List[str]:
'''simple docstring'''
UpperCamelCase = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 27575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 22804, 18818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 10326, 24, 2267, 20, 416, 5072, 15612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCamelCase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=A_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=A_ , )
| 706 |
'''simple docstring'''
from typing import List, Union
import numpy as np
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, logging
from .base import PIPELINE_INIT_ARGS, ArgumentHandler, ChunkPipeline
lowerCAmelCase : Any = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def UpperCAmelCase_ ( self , A_ )-> Any:
'''simple docstring'''
if isinstance(A_ , A_ ):
UpperCamelCase = [label.strip() for label in labels.split(',' ) if label.strip()]
return labels
def __call__( self , A_ , A_ , A_ )-> List[str]:
'''simple docstring'''
if len(A_ ) == 0 or len(A_ ) == 0:
raise ValueError('You must include at least one label and at least one sequence.' )
if hypothesis_template.format(labels[0] ) == hypothesis_template:
raise ValueError(
(
'The provided hypothesis_template "{}" was not able to be formatted with the target labels. '
'Make sure the passed template includes formatting syntax such as {{}} where the label should go.'
).format(A_ ) )
if isinstance(A_ , A_ ):
UpperCamelCase = [sequences]
UpperCamelCase = []
for sequence in sequences:
sequence_pairs.extend([[sequence, hypothesis_template.format(A_ )] for label in labels] )
return sequence_pairs, sequences
@add_end_docstrings(snake_case_)
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_=ZeroShotClassificationArgumentHandler() , *A_ , **A_ )-> Tuple:
'''simple docstring'''
UpperCamelCase = args_parser
super().__init__(*A_ , **A_ )
if self.entailment_id == -1:
logger.warning(
'Failed to determine \'entailment\' label id from the label2id mapping in the model config. Setting to '
'-1. Define a descriptive label2id mapping in the model config to ensure correct outputs.' )
@property
def UpperCAmelCase_ ( self )-> Union[str, Any]:
'''simple docstring'''
for label, ind in self.model.config.labelaid.items():
if label.lower().startswith('entail' ):
return ind
return -1
def UpperCAmelCase_ ( self , A_ , A_=True , A_=True , A_=TruncationStrategy.ONLY_FIRST , **A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = self.framework
if self.tokenizer.pad_token is None:
# Override for tokenizers not supporting padding
logger.error(
'Tokenizer was not supporting padding necessary for zero-shot, attempting to use '
' `pad_token=eos_token`' )
UpperCamelCase = self.tokenizer.eos_token
try:
UpperCamelCase = self.tokenizer(
A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , truncation=A_ , )
except Exception as e:
if "too short" in str(A_ ):
# tokenizers might yell that we want to truncate
# to a value that is not even reached by the input.
# In that case we don't want to truncate.
# It seems there's not a really better way to catch that
# exception.
UpperCamelCase = self.tokenizer(
A_ , add_special_tokens=A_ , return_tensors=A_ , padding=A_ , truncation=TruncationStrategy.DO_NOT_TRUNCATE , )
else:
raise e
return inputs
def UpperCAmelCase_ ( self , **A_ )-> str:
'''simple docstring'''
if kwargs.get('multi_class' , A_ ) is not None:
UpperCamelCase = kwargs['multi_class']
logger.warning(
'The `multi_class` argument has been deprecated and renamed to `multi_label`. '
'`multi_class` will be removed in a future version of Transformers.' )
UpperCamelCase = {}
if "candidate_labels" in kwargs:
UpperCamelCase = self._args_parser._parse_labels(kwargs['candidate_labels'] )
if "hypothesis_template" in kwargs:
UpperCamelCase = kwargs['hypothesis_template']
UpperCamelCase = {}
if "multi_label" in kwargs:
UpperCamelCase = kwargs['multi_label']
return preprocess_params, {}, postprocess_params
def __call__( self , A_ , *A_ , **A_ , )-> int:
'''simple docstring'''
if len(A_ ) == 0:
pass
elif len(A_ ) == 1 and "candidate_labels" not in kwargs:
UpperCamelCase = args[0]
else:
raise ValueError(F'''Unable to understand extra arguments {args}''' )
return super().__call__(A_ , **A_ )
def UpperCAmelCase_ ( self , A_ , A_=None , A_="This example is {}." )-> List[str]:
'''simple docstring'''
UpperCamelCase , UpperCamelCase = self._args_parser(A_ , A_ , A_ )
for i, (candidate_label, sequence_pair) in enumerate(zip(A_ , A_ ) ):
UpperCamelCase = self._parse_and_tokenize([sequence_pair] )
yield {
"candidate_label": candidate_label,
"sequence": sequences[0],
"is_last": i == len(A_ ) - 1,
**model_input,
}
def UpperCAmelCase_ ( self , A_ )-> int:
'''simple docstring'''
UpperCamelCase = inputs['candidate_label']
UpperCamelCase = inputs['sequence']
UpperCamelCase = {k: inputs[k] for k in self.tokenizer.model_input_names}
UpperCamelCase = self.model(**A_ )
UpperCamelCase = {
'candidate_label': candidate_label,
'sequence': sequence,
'is_last': inputs['is_last'],
**outputs,
}
return model_outputs
def UpperCAmelCase_ ( self , A_ , A_=False )-> List[str]:
'''simple docstring'''
UpperCamelCase = [outputs['candidate_label'] for outputs in model_outputs]
UpperCamelCase = [outputs['sequence'] for outputs in model_outputs]
UpperCamelCase = np.concatenate([output['logits'].numpy() for output in model_outputs] )
UpperCamelCase = logits.shape[0]
UpperCamelCase = len(A_ )
UpperCamelCase = N // n
UpperCamelCase = logits.reshape((num_sequences, n, -1) )
if multi_label or len(A_ ) == 1:
# softmax over the entailment vs. contradiction dim for each label independently
UpperCamelCase = self.entailment_id
UpperCamelCase = -1 if entailment_id == 0 else 0
UpperCamelCase = reshaped_outputs[..., [contradiction_id, entailment_id]]
UpperCamelCase = np.exp(A_ ) / np.exp(A_ ).sum(-1 , keepdims=A_ )
UpperCamelCase = scores[..., 1]
else:
# softmax the "entailment" logits over all candidate labels
UpperCamelCase = reshaped_outputs[..., self.entailment_id]
UpperCamelCase = np.exp(A_ ) / np.exp(A_ ).sum(-1 , keepdims=A_ )
UpperCamelCase = list(reversed(scores[0].argsort() ) )
return {
"sequence": sequences[0],
"labels": [candidate_labels[i] for i in top_inds],
"scores": scores[0, top_inds].tolist(),
}
| 432 | 0 |
'''simple docstring'''
import numpy as np
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 533 |
'''simple docstring'''
import argparse
import datetime
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
_SCREAMING_SNAKE_CASE : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(SCREAMING_SNAKE_CASE__ ) < 11:
raise ValueError("""Must be 10 characters long""" )
# Get month
_SCREAMING_SNAKE_CASE : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""" )
_SCREAMING_SNAKE_CASE : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get day
_SCREAMING_SNAKE_CASE : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""" )
# Get second separator
_SCREAMING_SNAKE_CASE : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""" )
# Get year
_SCREAMING_SNAKE_CASE : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""" )
# Get datetime obj for validation
_SCREAMING_SNAKE_CASE : Optional[int] = datetime.date(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) )
# Start math
if m <= 2:
_SCREAMING_SNAKE_CASE : Union[str, Any] = y - 1
_SCREAMING_SNAKE_CASE : Optional[int] = m + 12
# maths var
_SCREAMING_SNAKE_CASE : int = int(str(SCREAMING_SNAKE_CASE__ )[:2] )
_SCREAMING_SNAKE_CASE : int = int(str(SCREAMING_SNAKE_CASE__ )[2:] )
_SCREAMING_SNAKE_CASE : int = int(2.6 * m - 5.3_9 )
_SCREAMING_SNAKE_CASE : int = int(c / 4 )
_SCREAMING_SNAKE_CASE : int = int(k / 4 )
_SCREAMING_SNAKE_CASE : int = int(d + k )
_SCREAMING_SNAKE_CASE : int = int(t + u + v + x )
_SCREAMING_SNAKE_CASE : int = int(z - (2 * c) )
_SCREAMING_SNAKE_CASE : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""" )
# Response
_SCREAMING_SNAKE_CASE : str = f"""Your date {date_input}, is a {days[str(SCREAMING_SNAKE_CASE__ )]}!"""
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase_ : Tuple = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
UpperCAmelCase_ : List[Any] = parser.parse_args()
zeller(args.date_input)
| 533 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A_ : int =logging.get_logger(__name__)
A_ : List[str] ='''▁'''
A_ : Any ={'''vocab_file''': '''sentencepiece.bpe.model'''}
A_ : List[str] ={
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
A_ : List[Any] ={
'''facebook/xglm-564M''': 20_48,
}
class __UpperCAmelCase ( __a ):
__A : Optional[int] = VOCAB_FILES_NAMES
__A : Any = PRETRAINED_VOCAB_FILES_MAP
__A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Dict = ['input_ids', 'attention_mask']
def __init__( self , _lowerCamelCase , _lowerCamelCase="<s>" , _lowerCamelCase="</s>" , _lowerCamelCase="</s>" , _lowerCamelCase="<s>" , _lowerCamelCase="<unk>" , _lowerCamelCase="<pad>" , _lowerCamelCase = None , **_lowerCamelCase , ):
lowerCAmelCase_ = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowerCAmelCase_ = 7
lowerCAmelCase_ = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowerCAmelCase_ = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
lowerCAmelCase_ = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCAmelCase_ = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCAmelCase_ = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
lowerCAmelCase_ = len(self.sp_model )
lowerCAmelCase_ = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(_lowerCamelCase )
lowerCAmelCase_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ):
lowerCAmelCase_ = self.__dict__.copy()
lowerCAmelCase_ = None
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _lowerCamelCase ):
lowerCAmelCase_ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase_ = {}
lowerCAmelCase_ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowerCAmelCase_ = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase ))
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase ))
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
lowerCAmelCase_ = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase_ ( self ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = {self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase_ ( self , _lowerCamelCase ):
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCAmelCase_ = self.sp_model.PieceToId(_lowerCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase_ ( self , _lowerCamelCase ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase_ ( self , _lowerCamelCase ):
lowerCAmelCase_ = ''''''.join(_lowerCamelCase ).replace(_lowerCamelCase , ''' ''' ).strip()
return out_string
def UpperCAmelCase_ ( self , _lowerCamelCase , _lowerCamelCase = None ):
if not os.path.isdir(_lowerCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase_ = os.path.join(
_lowerCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , '''wb''' ) as fi:
lowerCAmelCase_ = self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
return (out_vocab_file,)
| 606 | '''simple docstring'''
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def snake_case_ ( __snake_case : Tuple) -> str:
lowerCAmelCase_ = os.path.join(args.tf_model_dir , '''parameters.json''')
lowerCAmelCase_ = json.loads(open(__snake_case).read())
if not params:
raise ValueError(
F'''It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.''')
if not args.output.endswith('''.pt'''):
lowerCAmelCase_ = args.output + '''.pt'''
lowerCAmelCase_ = OrderedDict()
with tf.device('''/CPU:0'''):
lowerCAmelCase_ = tf.train.load_checkpoint(args.tf_model_dir)
lowerCAmelCase_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
lowerCAmelCase_ = reader.get_tensor(__snake_case).astype(np.floataa)
if key_name.endswith('''/adam_m''') or key_name.endswith('''/adam_v'''):
continue
if key_name.startswith('''pasts/'''):
if key_name.startswith('''pasts/mlp'''):
lowerCAmelCase_ = int(key_name[9])
elif key_name.startswith('''pasts/out'''):
lowerCAmelCase_ = 8
lowerCAmelCase_ = '''model.sqout.%d.weight''' % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/moe'''):
lowerCAmelCase_ = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/switch_gating/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.router.classifier.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/softmlp/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.soft_bypass_mlp.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/wo/kernel''') or key_name.endswith('''/wi/kernel'''):
lowerCAmelCase_ = key_name[-9:-7]
for i in range(16):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight''' % (player, i, nlayer)
lowerCAmelCase_ = (
vnp[i].transpose([1, 0]).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/mlp'''):
lowerCAmelCase_ = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/p1/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/p1/bias'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wi.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/p2/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.weight''' % player
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/p2/bias'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.mlp.wo.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/ln'''):
lowerCAmelCase_ = int(key_name[8:].split('''/''')[0])
if key_name.endswith('''/b'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.norm.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/g'''):
lowerCAmelCase_ = '''model.blocks.%d.feed_forward.norm.weight''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/att'''):
lowerCAmelCase_ = int(key_name[9:].split('''/''')[0])
if key_name.endswith('''/qkv/kernel'''):
lowerCAmelCase_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
lowerCAmelCase_ = state[:, 0, :, :]
lowerCAmelCase_ = state[:, 1, :, :]
lowerCAmelCase_ = state[:, 2, :, :]
lowerCAmelCase_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]])
.transpose([1, 0])
.copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.q_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(__snake_case)
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.k_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(__snake_case)
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.v_proj.weight''' % player
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/o/kernel'''):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.self_attn.out_proj.weight''' % player
lowerCAmelCase_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]]).transpose([1, 0]).copy()
) # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/an'''):
lowerCAmelCase_ = int(key_name[8:].split('''/''')[0])
if key_name.endswith('''/b'''):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.norm.bias''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.endswith('''/g'''):
lowerCAmelCase_ = '''model.blocks.%d.self_attn.norm.weight''' % player
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
elif (
key_name.startswith('''model/wte''')
or key_name.startswith('''model/wpe''')
or key_name.startswith('''model/ete''')
):
lowerCAmelCase_ = {'''wte''': '''embed_tokens''', '''wpe''': '''position_embeddings''', '''ete''': '''extra_position_embeddings'''}[
key_name[-3:]
]
lowerCAmelCase_ = '''model.%s.weight''' % nlayer
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = torch.tensor(__snake_case)
if key_name.startswith('''model/wte'''):
lowerCAmelCase_ = '''lm_head.weight'''
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name.startswith('''model/wob'''):
lowerCAmelCase_ = '''final_logits_bias'''
lowerCAmelCase_ = vnp.copy() # same in embedded
lowerCAmelCase_ = state.reshape((1, -1))
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name == "model/dense/kernel":
lowerCAmelCase_ = '''model.last_project.weight'''
lowerCAmelCase_ = vnp.transpose([1, 0]).copy() # Mesh-Tensorflow is a diagonal matrix
lowerCAmelCase_ = torch.tensor(__snake_case)
elif key_name == "model/dense_1/bias":
lowerCAmelCase_ = '''model.last_project.bias'''
lowerCAmelCase_ = vnp.copy() # same because it is one dimensional
lowerCAmelCase_ = torch.tensor(__snake_case)
torch.save(__snake_case , args.output)
if __name__ == "__main__":
A_ : Optional[Any] =argparse.ArgumentParser(
description='''model converter.''', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('''--tf_model_dir''', metavar='''PATH''', type=str, required=True, help='''import model''')
parser.add_argument('''--output''', metavar='''PATH''', type=str, required=True, help='''output model''')
A_ : Union[str, Any] =parser.parse_args()
convert_tf_gptsan_to_pt(args)
| 606 | 1 |
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class A__ :
def __init__( self , UpperCamelCase__=2 , UpperCamelCase__=3 , UpperCamelCase__=64 , UpperCamelCase__=None ) -> Tuple:
'''simple docstring'''
A_ = np.random.default_rng(UpperCamelCase__ )
A_ = length
A_ = rng.normal(size=(length,) ).astype(np.floataa )
A_ = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ) -> Dict:
'''simple docstring'''
return self.length
def __getitem__( self , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class A__ ( torch.nn.Module ):
def __init__( self , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=False ) -> str:
'''simple docstring'''
super().__init__()
A_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A_ = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
A_ = True
def snake_case_ ( self , UpperCamelCase__=None ) -> Dict:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
A_ = False
return x * self.a[0] + self.b[0]
class A__ ( torch.nn.Module ):
def __init__( self , UpperCamelCase__=0 , UpperCamelCase__=0 , UpperCamelCase__=False ) -> List[Any]:
'''simple docstring'''
super().__init__()
A_ = torch.nn.Parameter(torch.tensor(UpperCamelCase__ ).float() )
A_ = torch.nn.Parameter(torch.tensor(UpperCamelCase__ ).float() )
A_ = True
def snake_case_ ( self , UpperCamelCase__=None ) -> Union[str, Any]:
'''simple docstring'''
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
A_ = False
return x * self.a + self.b
def UpperCAmelCase__ ( UpperCAmelCase__, UpperCAmelCase__ = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
A_ = AutoTokenizer.from_pretrained("""bert-base-cased""" )
A_ = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
A_ = load_dataset("""csv""", data_files=UpperCAmelCase__ )
A_ = datasets["""train"""].unique("""label""" )
A_ = {v: i for i, v in enumerate(UpperCAmelCase__ )}
def tokenize_function(UpperCAmelCase__ ):
# max_length=None => use the model max length (it's actually the default)
A_ = tokenizer(
examples["""sentence1"""], examples["""sentence2"""], truncation=UpperCAmelCase__, max_length=UpperCAmelCase__, padding="""max_length""" )
if "label" in examples:
A_ = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A_ = datasets.map(
UpperCAmelCase__, batched=UpperCAmelCase__, remove_columns=["""sentence1""", """sentence2""", """label"""], )
def collate_fn(UpperCAmelCase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase__, padding="""max_length""", max_length=1_28, return_tensors="""pt""" )
return tokenizer.pad(UpperCAmelCase__, padding="""longest""", return_tensors="""pt""" )
# Instantiate dataloaders.
A_ = DataLoader(tokenized_datasets["""train"""], shuffle=UpperCAmelCase__, collate_fn=UpperCAmelCase__, batch_size=2 )
A_ = DataLoader(tokenized_datasets["""validation"""], shuffle=UpperCAmelCase__, collate_fn=UpperCAmelCase__, batch_size=1 )
return train_dataloader, eval_dataloader
| 288 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class A__ ( _snake_case , _snake_case ):
lowercase = "resnet"
lowercase = ["basic", "bottleneck"]
def __init__( self , UpperCamelCase__=3 , UpperCamelCase__=64 , UpperCamelCase__=[256, 512, 1024, 2048] , UpperCamelCase__=[3, 4, 6, 3] , UpperCamelCase__="bottleneck" , UpperCamelCase__="relu" , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if layer_type not in self.layer_types:
raise ValueError(f'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
A_ = num_channels
A_ = embedding_size
A_ = hidden_sizes
A_ = depths
A_ = layer_type
A_ = hidden_act
A_ = downsample_in_first_stage
A_ = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )]
A_ , A_ = get_aligned_output_features_output_indices(
out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
class A__ ( _snake_case ):
lowercase = version.parse("1.11" )
@property
def snake_case_ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def snake_case_ ( self ) -> float:
'''simple docstring'''
return 1e-3
| 288 | 1 |
'''simple docstring'''
import qiskit
def __snake_case ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
lowerCamelCase_ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
lowerCamelCase_ = qiskit.QuantumCircuit(UpperCAmelCase_ , UpperCAmelCase_ )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
lowerCamelCase_ = qiskit.execute(UpperCAmelCase_ , UpperCAmelCase_ , shots=1000 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''Total count for various states are: {single_qubit_measure(1, 1)}''')
| 718 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a_ : Dict = logging.get_logger(__name__)
a_ : List[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Optional[Any] = {
"""tokenizer_file""": {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json""",
},
}
a_ : str = {
"""gpt-neox-20b""": 2048,
}
class snake_case ( lowercase ):
"""simple docstring"""
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ["input_ids", "attention_mask"]
def __init__( self , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase=None , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase="<|endoftext|>" , UpperCamelCase=False , **UpperCamelCase , ):
"""simple docstring"""
super().__init__(
UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCamelCase ) != add_prefix_space:
lowerCamelCase_ = getattr(UpperCamelCase , pre_tok_state.pop("type" ) )
lowerCamelCase_ = add_prefix_space
lowerCamelCase_ = pre_tok_class(**UpperCamelCase )
lowerCamelCase_ = add_prefix_space
def snake_case ( self , UpperCamelCase , UpperCamelCase = None ):
"""simple docstring"""
lowerCamelCase_ = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase )
return tuple(UpperCamelCase )
def snake_case ( self , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] )
if len(UpperCamelCase ) > self.model_max_length:
lowerCamelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 445 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : Tuple = logging.get_logger(__name__)
A__ : Dict = {
"""RWKV/rwkv-4-169m-pile""": """https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-430m-pile""": """https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-1b5-pile""": """https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-3b-pile""": """https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-7b-pile""": """https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json""",
"""RWKV/rwkv-4-14b-pile""": """https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json""",
"""RWKV/rwkv-raven-1b5""": """https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json""",
"""RWKV/rwkv-raven-3b""": """https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json""",
"""RWKV/rwkv-raven-7b""": """https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json""",
"""RWKV/rwkv-raven-14b""": """https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json""",
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : List[Any] = 'rwkv'
lowerCamelCase : Any = {'max_position_embeddings': 'context_length'}
def __init__( self , SCREAMING_SNAKE_CASE_=5_02_77 , SCREAMING_SNAKE_CASE_=10_24 , SCREAMING_SNAKE_CASE_=40_96 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=1E-5 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=0 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=True , **SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
__lowerCamelCase : Optional[int] = vocab_size
__lowerCamelCase : Tuple = context_length
__lowerCamelCase : str = hidden_size
__lowerCamelCase : List[str] = num_hidden_layers
__lowerCamelCase : Any = attention_hidden_size if attention_hidden_size is not None else hidden_size
__lowerCamelCase : Optional[int] = intermediate_size if intermediate_size is not None else 4 * hidden_size
__lowerCamelCase : Optional[Any] = layer_norm_epsilon
__lowerCamelCase : int = rescale_every
__lowerCamelCase : Tuple = use_cache
__lowerCamelCase : int = bos_token_id
__lowerCamelCase : Optional[Any] = eos_token_id
super().__init__(
tie_word_embeddings=SCREAMING_SNAKE_CASE_ , bos_token_id=SCREAMING_SNAKE_CASE_ , eos_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 13 |
"""simple docstring"""
import os
def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]:
'''simple docstring'''
with open(os.path.dirname(snake_case ) + "/p022_names.txt" ) as file:
UpperCAmelCase__ : Tuple = str(file.readlines()[0] )
UpperCAmelCase__ : str = names.replace("\"" , "" ).split("," )
names.sort()
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : List[str] = 0
for i, name in enumerate(snake_case ):
for letter in name:
name_score += ord(snake_case ) - 64
total_score += (i + 1) * name_score
UpperCAmelCase__ : Optional[int] = 0
return total_score
if __name__ == "__main__":
print(solution())
| 438 | 0 |
'''simple docstring'''
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def snake_case_ ( SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
return EnvironmentCommand()
class __SCREAMING_SNAKE_CASE ( UpperCamelCase_ ):
'''simple docstring'''
@staticmethod
def UpperCamelCase( lowerCamelCase ):
_snake_case = parser.add_parser("env" )
download_parser.set_defaults(func=lowerCamelCase )
def UpperCamelCase( self ):
_snake_case = huggingface_hub.__version__
_snake_case = '''not installed'''
_snake_case = '''NA'''
if is_torch_available():
import torch
_snake_case = torch.__version__
_snake_case = torch.cuda.is_available()
_snake_case = '''not installed'''
if is_transformers_available():
import transformers
_snake_case = transformers.__version__
_snake_case = '''not installed'''
if is_accelerate_available():
import accelerate
_snake_case = accelerate.__version__
_snake_case = '''not installed'''
if is_xformers_available():
import xformers
_snake_case = xformers.__version__
_snake_case = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': F'''{pt_version} ({pt_cuda_available})''',
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(lowerCamelCase ) )
return info
@staticmethod
def UpperCamelCase( lowerCamelCase ):
return "\n".join([F'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 719 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ : Dict = logging.get_logger(__name__)
__magic_name__ : Dict = {
"""google/mobilenet_v1_1.0_224""": """https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v1_0.75_192""": """https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json""",
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = '''mobilenet_v1'''
def __init__( self , lowerCamelCase=3 , lowerCamelCase=224 , lowerCamelCase=1.0 , lowerCamelCase=8 , lowerCamelCase="relu6" , lowerCamelCase=True , lowerCamelCase=0.999 , lowerCamelCase=0.02 , lowerCamelCase=0.001 , **lowerCamelCase , ):
super().__init__(**lowerCamelCase )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = min_depth
_snake_case = hidden_act
_snake_case = tf_padding
_snake_case = classifier_dropout_prob
_snake_case = initializer_range
_snake_case = layer_norm_eps
class __SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
'''simple docstring'''
UpperCAmelCase__ : int = version.parse('''1.11''' )
@property
def UpperCamelCase( self ):
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def UpperCamelCase( self ):
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def UpperCamelCase( self ):
return 1e-4
| 368 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a = logging.getLogger(__name__)
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase : Optional[str] = field(
default=__lowerCAmelCase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether tp freeze the encoder.'''} )
UpperCAmelCase : bool = field(default=__lowerCAmelCase , metadata={'''help''': '''Whether to freeze the embeddings.'''} )
@dataclass
class lowercase_ :
'''simple docstring'''
UpperCAmelCase : str = field(
metadata={'''help''': '''The input data dir. Should contain the .tsv files (or other data files) for the task.'''} )
UpperCAmelCase : Optional[str] = field(
default='''summarization''' , metadata={'''help''': '''Task name, summarization (or summarization_{dataset} for pegasus) or translation'''} , )
UpperCAmelCase : Optional[int] = field(
default=1024 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=128 , metadata={
'''help''': (
'''The maximum total sequence length for target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for validation target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded. '''
'''This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '''
'''during ``evaluate`` and ``predict``.'''
)
} , )
UpperCAmelCase : Optional[int] = field(
default=142 , metadata={
'''help''': (
'''The maximum total sequence length for test target text after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# training examples. -1 means use all.'''} )
UpperCAmelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# validation examples. -1 means use all.'''} )
UpperCAmelCase : Optional[int] = field(default=-1 , metadata={'''help''': '''# test examples. -1 means use all.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''Source language id for translation.'''} )
UpperCAmelCase : Optional[str] = field(default=__lowerCAmelCase , metadata={'''help''': '''Target language id for translation.'''} )
UpperCAmelCase : Optional[int] = field(default=__lowerCAmelCase , metadata={'''help''': '''# num_beams to use for evaluation.'''} )
UpperCAmelCase : bool = field(
default=__lowerCAmelCase , metadata={'''help''': '''If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'''} , )
def _snake_case ( _snake_case : Optional[Any] , _snake_case : int , _snake_case : Optional[Any] ) -> str:
'''simple docstring'''
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(_snake_case , os.path.join(_snake_case , F'''{split}_results.json''' ) )
def _snake_case ( ) -> Tuple:
'''simple docstring'''
_A = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_A , _A , _A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_A , _A , _A = parser.parse_args_into_dataclasses()
check_output_dir(_snake_case )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info('Training/evaluation parameters %s' , _snake_case )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(_snake_case , _snake_case , _snake_case ):
assert hasattr(_snake_case , _snake_case ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(_snake_case , _snake_case , getattr(_snake_case , _snake_case ) )
_A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_A = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf='.ckpt' in model_args.model_name_or_path , config=_snake_case , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_snake_case , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_A = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_snake_case , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_snake_case , _snake_case ):
_A = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_A = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_snake_case )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_A = SeqaSeqDataset
# Get datasets
_A = (
dataset_class(
_snake_case , type_path='train' , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_train
else None
)
_A = (
dataset_class(
_snake_case , type_path='val' , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_A = (
dataset_class(
_snake_case , type_path='test' , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or '' , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_A = (
build_compute_metrics_fn(data_args.task , _snake_case ) if training_args.predict_with_generate else None
)
_A = SeqaSeqTrainer(
model=_snake_case , args=_snake_case , data_args=_snake_case , train_dataset=_snake_case , eval_dataset=_snake_case , data_collator=SeqaSeqDataCollator(
_snake_case , _snake_case , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_snake_case , tokenizer=_snake_case , )
_A = {}
# Training
if training_args.do_train:
logger.info('*** Train ***' )
_A = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_A = train_result.metrics
_A = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics('train' , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , 'trainer_state.json' ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info('*** Evaluate ***' )
_A = trainer.evaluate(metric_key_prefix='val' )
_A = data_args.n_val
_A = round(metrics['val_loss'] , 4 )
if trainer.is_world_process_zero():
handle_metrics('val' , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.do_predict:
logger.info('*** Predict ***' )
_A = trainer.predict(test_dataset=_snake_case , metric_key_prefix='test' )
_A = test_output.metrics
_A = data_args.n_test
if trainer.is_world_process_zero():
_A = round(metrics['test_loss'] , 4 )
handle_metrics('test' , _snake_case , training_args.output_dir )
all_metrics.update(_snake_case )
if training_args.predict_with_generate:
_A = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_snake_case , clean_up_tokenization_spaces=_snake_case )
_A = lmap(str.strip , _snake_case )
write_txt_file(_snake_case , os.path.join(training_args.output_dir , 'test_generations.txt' ) )
if trainer.is_world_process_zero():
save_json(_snake_case , os.path.join(training_args.output_dir , 'all_results.json' ) )
return all_metrics
def _snake_case ( _snake_case : List[Any] ) -> int:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 7 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 412 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
'configuration_blip_2': [
'BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Blip2Config',
'Blip2QFormerConfig',
'Blip2VisionConfig',
],
'processing_blip_2': ['Blip2Processor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
'BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST',
'Blip2Model',
'Blip2QFormerModel',
'Blip2PreTrainedModel',
'Blip2ForConditionalGeneration',
'Blip2VisionModel',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 8 |
'''simple docstring'''
import importlib.metadata
from typing import Union
from packaging.version import Version, parse
from .constants import STR_OPERATION_TO_FUNC
SCREAMING_SNAKE_CASE = parse(importlib.metadata.version('torch'))
def lowercase_ ( __A : Union[str, Version] , __A : str , __A : str ) -> Union[str, Any]:
"""simple docstring"""
if operation not in STR_OPERATION_TO_FUNC.keys():
raise ValueError(F'`operation` must be one of {list(STR_OPERATION_TO_FUNC.keys() )}, received {operation}' )
lowercase : Any =STR_OPERATION_TO_FUNC[operation]
if isinstance(__A , __A ):
lowercase : List[Any] =parse(importlib.metadata.version(__A ) )
return operation(__A , parse(__A ) )
def lowercase_ ( __A : str , __A : str ) -> Tuple:
"""simple docstring"""
return compare_versions(__A , __A , __A )
| 8 | 1 |
from __future__ import annotations
def lowercase__ ( A_: list[int] ) -> bool:
"""simple docstring"""
return len(set(A_ ) ) == len(A_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 68 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : Any = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(A__, A__ )
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = emb.weight.shape
SCREAMING_SNAKE_CASE_ : List[Any] = nn.Linear(A__, A__, bias=A__ )
SCREAMING_SNAKE_CASE_ : Dict = emb.weight.data
return lin_layer
def a__ ( A__ ):
SCREAMING_SNAKE_CASE_ : List[Any] = torch.load(A__, map_location='cpu' )
SCREAMING_SNAKE_CASE_ : Tuple = mam_aaa['args'] or mam_aaa['cfg']['model']
SCREAMING_SNAKE_CASE_ : Any = mam_aaa['model']
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE_ : List[str] = state_dict['encoder.embed_tokens.weight'].shape[0]
SCREAMING_SNAKE_CASE_ : Dict = MaMaaaConfig(
vocab_size=A__, max_position_embeddings=1_0_2_4, encoder_layers=args.encoder_layers, decoder_layers=args.decoder_layers, encoder_attention_heads=args.encoder_attention_heads, decoder_attention_heads=args.decoder_attention_heads, encoder_ffn_dim=args.encoder_ffn_embed_dim, decoder_ffn_dim=args.decoder_ffn_embed_dim, d_model=args.encoder_embed_dim, encoder_layerdrop=args.encoder_layerdrop, decoder_layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function='relu', )
SCREAMING_SNAKE_CASE_ : int = state_dict['decoder.embed_tokens.weight']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = MaMaaaForConditionalGeneration(A__ )
model.model.load_state_dict(A__, strict=A__ )
SCREAMING_SNAKE_CASE_ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCAmelCase__ : Optional[int] =argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
lowerCAmelCase__ : Optional[Any] =parser.parse_args()
lowerCAmelCase__ : int =convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 101 | 0 |
"""simple docstring"""
import os
import sys
import unittest
__A = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
__A = os.path.join(git_repo_path, '''src''', '''diffusers''')
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Optional[Any] ):
__lowerCamelCase : Optional[int] = find_backend(" if not is_torch_available():" )
self.assertEqual(UpperCAmelCase , "torch" )
# backend_with_underscore = find_backend(" if not is_tensorflow_text_available():")
# self.assertEqual(backend_with_underscore, "tensorflow_text")
__lowerCamelCase : str = find_backend(" if not (is_torch_available() and is_transformers_available()):" )
self.assertEqual(UpperCAmelCase , "torch_and_transformers" )
# double_backend_with_underscore = find_backend(
# " if not (is_sentencepiece_available() and is_tensorflow_text_available()):"
# )
# self.assertEqual(double_backend_with_underscore, "sentencepiece_and_tensorflow_text")
__lowerCamelCase : Tuple = find_backend(
" if not (is_torch_available() and is_transformers_available() and is_onnx_available()):" )
self.assertEqual(UpperCAmelCase , "torch_and_transformers_and_onnx" )
def lowerCamelCase__ ( self : Dict ):
__lowerCamelCase : Optional[Any] = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn("torch" , UpperCAmelCase )
self.assertIn("torch_and_transformers" , UpperCAmelCase )
self.assertIn("flax_and_transformers" , UpperCAmelCase )
self.assertIn("torch_and_transformers_and_onnx" , UpperCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn("UNet2DModel" , objects["torch"] )
self.assertIn("FlaxUNet2DConditionModel" , objects["flax"] )
self.assertIn("StableDiffusionPipeline" , objects["torch_and_transformers"] )
self.assertIn("FlaxStableDiffusionPipeline" , objects["flax_and_transformers"] )
self.assertIn("LMSDiscreteScheduler" , objects["torch_and_scipy"] )
self.assertIn("OnnxStableDiffusionPipeline" , objects["torch_and_transformers_and_onnx"] )
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Union[str, Any] = create_dummy_object("CONSTANT" , "'torch'" )
self.assertEqual(UpperCAmelCase , "\nCONSTANT = None\n" )
__lowerCamelCase : Optional[Any] = create_dummy_object("function" , "'torch'" )
self.assertEqual(
UpperCAmelCase , "\ndef function(*args, **kwargs):\n requires_backends(function, 'torch')\n" )
__lowerCamelCase : List[str] = "\nclass FakeClass(metaclass=DummyObject):\n _backends = 'torch'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, 'torch')\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, 'torch')\n"
__lowerCamelCase : Optional[int] = create_dummy_object("FakeClass" , "'torch'" )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
def lowerCamelCase__ ( self : List[Any] ):
__lowerCamelCase : str = "# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, [\"torch\"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = [\"torch\"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, [\"torch\"])\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, [\"torch\"])\n"
__lowerCamelCase : Union[str, Any] = create_dummy_files({"torch": ["CONSTANT", "function", "FakeClass"]} )
self.assertEqual(dummy_files["torch"] , UpperCAmelCase ) | 366 | """simple docstring"""
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
__A = ''''''
if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''):
class _snake_case ( tr.AbstractTransform ):
def __init__( self : Optional[Any] , UpperCAmelCase : str = " " ):
__lowerCamelCase : List[str] = sentence_delimiter
def lowerCamelCase__ ( self : Tuple , UpperCAmelCase : str ):
return list(UpperCAmelCase )
def lowerCamelCase__ ( self : Dict , UpperCAmelCase : List[str] ):
__lowerCamelCase : Any = []
for sent_idx, sentence in enumerate(UpperCAmelCase ):
chars.extend(self.process_string(UpperCAmelCase ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(UpperCAmelCase ) - 1:
chars.append(self.sentence_delimiter )
return chars
__A = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
__A = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
__A = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
__A = '''\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
'''
__A = '''
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> cer = datasets.load_metric("cer")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
def lowerCamelCase__ ( self : Union[str, Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[
"https://en.wikipedia.org/wiki/Word_error_rate",
"https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates",
] , )
def lowerCamelCase__ ( self : Optional[Any] , UpperCAmelCase : Union[str, Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=False ):
if concatenate_texts:
return jiwer.compute_measures(
UpperCAmelCase , UpperCAmelCase , truth_transform=UpperCAmelCase , hypothesis_transform=UpperCAmelCase , )["wer"]
__lowerCamelCase : Optional[int] = 0
__lowerCamelCase : Optional[int] = 0
for prediction, reference in zip(UpperCAmelCase , UpperCAmelCase ):
__lowerCamelCase : Dict = jiwer.compute_measures(
UpperCAmelCase , UpperCAmelCase , truth_transform=UpperCAmelCase , hypothesis_transform=UpperCAmelCase , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 366 | 1 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
SCREAMING_SNAKE_CASE = get_logger(__name__)
class A_ :
'''simple docstring'''
def __init__( self , _A = None) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = (
os.path.join(_A , config.EXTRACTED_DATASETS_DIR) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_UpperCAmelCase : int = Extractor
def snake_case__ ( self , _A) -> str:
"""simple docstring"""
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_UpperCAmelCase : Union[str, Any] = os.path.abspath(_A)
return os.path.join(self.extract_dir , hash_url_to_filename(_A))
def snake_case__ ( self , _A , _A) -> bool:
"""simple docstring"""
return force_extract or (
not os.path.isfile(_A) and not (os.path.isdir(_A) and os.listdir(_A))
)
def snake_case__ ( self , _A , _A = False) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.extractor.infer_extractor_format(_A)
if not extractor_format:
return input_path
_UpperCAmelCase : Optional[int] = self._get_output_path(_A)
if self._do_extract(_A , _A):
self.extractor.extract(_A , _A , _A)
return output_path
class A_ ( __lowercase ):
'''simple docstring'''
@classmethod
@abstractmethod
def snake_case__ ( cls , _A , **_A) -> bool:
"""simple docstring"""
...
@staticmethod
@abstractmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
...
class A_ ( __lowercase , __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : List[bytes] = []
@staticmethod
def snake_case__ ( _A , _A) -> int:
"""simple docstring"""
with open(_A , '''rb''') as f:
return f.read(_A)
@classmethod
def snake_case__ ( cls , _A , _A = b"") -> bool:
"""simple docstring"""
if not magic_number:
_UpperCAmelCase : Any = max(len(_A) for cls_magic_number in cls.magic_numbers)
try:
_UpperCAmelCase : int = cls.read_magic_number(_A , _A)
except OSError:
return False
return any(magic_number.startswith(_A) for cls_magic_number in cls.magic_numbers)
class A_ ( __lowercase ):
'''simple docstring'''
@classmethod
def snake_case__ ( cls , _A , **_A) -> bool:
"""simple docstring"""
return tarfile.is_tarfile(_A)
@staticmethod
def snake_case__ ( _A , _A) -> Tuple:
"""simple docstring"""
def resolved(_A) -> str:
return os.path.realpath(os.path.abspath(_A))
def badpath(_A , _A) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_A , _A)).startswith(_A)
def badlink(_A , _A) -> bool:
# Links are interpreted relative to the directory containing the link
_UpperCAmelCase : Any = resolved(os.path.join(_A , os.path.dirname(info.name)))
return badpath(info.linkname , base=_A)
_UpperCAmelCase : int = resolved(_A)
for finfo in members:
if badpath(finfo.name , _A):
logger.error(f'''Extraction of {finfo.name} is blocked (illegal path)''')
elif finfo.issym() and badlink(_A , _A):
logger.error(f'''Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}''')
elif finfo.islnk() and badlink(_A , _A):
logger.error(f'''Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}''')
else:
yield finfo
@staticmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A)
_UpperCAmelCase : Any = tarfile.open(_A)
tar_file.extractall(_A , members=TarExtractor.safemembers(_A , _A))
tar_file.close()
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict = [b"\x1F\x8B"]
@staticmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
with gzip.open(_A , '''rb''') as gzip_file:
with open(_A , '''wb''') as extracted_file:
shutil.copyfileobj(_A , _A)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def snake_case__ ( cls , _A , _A = b"") -> bool:
"""simple docstring"""
if super().is_extractable(_A , magic_number=_A):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_A , '''rb''') as fp:
_UpperCAmelCase : int = _EndRecData(_A)
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET]) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_UpperCAmelCase : List[Any] = fp.read(_A) # CD is where we expect it to be
if len(_A) == sizeCentralDir:
_UpperCAmelCase : Union[str, Any] = struct.unpack(_A , _A) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A)
with zipfile.ZipFile(_A , '''r''') as zip_file:
zip_file.extractall(_A)
zip_file.close()
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
with lzma.open(_A) as compressed_file:
with open(_A , '''wb''') as extracted_file:
shutil.copyfileobj(_A , _A)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
if not config.RARFILE_AVAILABLE:
raise ImportError('''Please pip install rarfile''')
import rarfile
os.makedirs(_A , exist_ok=_A)
_UpperCAmelCase : Dict = rarfile.RarFile(_A)
rf.extractall(_A)
rf.close()
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
if not config.ZSTANDARD_AVAILABLE:
raise ImportError('''Please pip install zstandard''')
import zstandard as zstd
_UpperCAmelCase : List[Any] = zstd.ZstdDecompressor()
with open(_A , '''rb''') as ifh, open(_A , '''wb''') as ofh:
dctx.copy_stream(_A , _A)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = [b"\x42\x5A\x68"]
@staticmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
with bza.open(_A , '''rb''') as compressed_file:
with open(_A , '''wb''') as extracted_file:
shutil.copyfileobj(_A , _A)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Union[str, Any] = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
if not config.PY7ZR_AVAILABLE:
raise ImportError('''Please pip install py7zr''')
import pyazr
os.makedirs(_A , exist_ok=_A)
with pyazr.SevenZipFile(_A , '''r''') as archive:
archive.extractall(_A)
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = [b"\x04\x22\x4D\x18"]
@staticmethod
def snake_case__ ( _A , _A) -> None:
"""simple docstring"""
if not config.LZ4_AVAILABLE:
raise ImportError('''Please pip install lz4''')
import lza.frame
with lza.frame.open(_A , '''rb''') as compressed_file:
with open(_A , '''wb''') as extracted_file:
shutil.copyfileobj(_A , _A)
class A_ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Dict[str, Type[BaseExtractor]] = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def snake_case__ ( cls) -> Dict:
"""simple docstring"""
return max(
len(_A)
for extractor in cls.extractors.values()
if issubclass(_A , _A)
for extractor_magic_number in extractor.magic_numbers)
@staticmethod
def snake_case__ ( _A , _A) -> List[Any]:
"""simple docstring"""
try:
return MagicNumberBaseExtractor.read_magic_number(_A , magic_number_length=_A)
except OSError:
return b""
@classmethod
def snake_case__ ( cls , _A , _A = False) -> bool:
"""simple docstring"""
warnings.warn(
'''Method \'is_extractable\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'infer_extractor_format\' instead.''' , category=_A , )
_UpperCAmelCase : Union[str, Any] = cls.infer_extractor_format(_A)
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def snake_case__ ( cls , _A) -> str: # <Added version="2.4.0"/>
"""simple docstring"""
_UpperCAmelCase : List[Any] = cls._get_magic_number_max_length()
_UpperCAmelCase : List[str] = cls._read_magic_number(_A , _A)
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_A , magic_number=_A):
return extractor_format
@classmethod
def snake_case__ ( cls , _A , _A , _A = None , _A = "deprecated" , ) -> None:
"""simple docstring"""
os.makedirs(os.path.dirname(_A) , exist_ok=_A)
# Prevent parallel extractions
_UpperCAmelCase : Optional[Any] = str(Path(_A).with_suffix('''.lock'''))
with FileLock(_A):
shutil.rmtree(_A , ignore_errors=_A)
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_A , _A): # passed as positional arg
warnings.warn(
'''Parameter \'extractor\' was deprecated in version 2.4.0 and will be removed in 3.0.0. '''
'''Use \'extractor_format\' instead.''' , category=_A , )
_UpperCAmelCase : int = extractor if extractor != '''deprecated''' else extractor_format
else:
_UpperCAmelCase : Union[str, Any] = cls.extractors[extractor_format]
return extractor.extract(_A , _A)
else:
warnings.warn(
'''Parameter \'extractor_format\' was made required in version 2.4.0 and not passing it will raise an '''
'''exception in 3.0.0.''' , category=_A , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_A):
return extractor.extract(_A , _A)
| 485 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json',
}
class A_ ( __lowercase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = "switch_transformers"
_SCREAMING_SNAKE_CASE : int = ["past_key_values"]
_SCREAMING_SNAKE_CASE : Optional[Any] = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _A=32128 , _A=768 , _A=64 , _A=2048 , _A=64 , _A=12 , _A=3 , _A=12 , _A=3 , _A=12 , _A=8 , _A=False , _A=0.01 , _A="float32" , _A=False , _A=32 , _A=128 , _A=0.1 , _A=1e-6 , _A=0.001 , _A=0.001 , _A=1.0 , _A="relu" , _A=True , _A=False , _A=True , _A=0 , _A=1 , **_A , ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : str = vocab_size
_UpperCAmelCase : Tuple = d_model
_UpperCAmelCase : Dict = d_kv
_UpperCAmelCase : str = d_ff
_UpperCAmelCase : int = num_sparse_encoder_layers
_UpperCAmelCase : Dict = num_layers
_UpperCAmelCase : int = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase : Dict = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
_UpperCAmelCase : int = self.num_layers // self.num_sparse_encoder_layers
else:
_UpperCAmelCase : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
_UpperCAmelCase : Optional[Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
_UpperCAmelCase : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers
_UpperCAmelCase : Any = num_heads
_UpperCAmelCase : List[Any] = num_experts
_UpperCAmelCase : List[str] = expert_capacity
_UpperCAmelCase : List[str] = router_bias
_UpperCAmelCase : Optional[Any] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''')
_UpperCAmelCase : List[str] = router_dtype
_UpperCAmelCase : Any = router_ignore_padding_tokens
_UpperCAmelCase : Optional[Any] = relative_attention_num_buckets
_UpperCAmelCase : Optional[int] = relative_attention_max_distance
_UpperCAmelCase : List[Any] = dropout_rate
_UpperCAmelCase : Optional[int] = layer_norm_epsilon
_UpperCAmelCase : Union[str, Any] = initializer_factor
_UpperCAmelCase : int = feed_forward_proj
_UpperCAmelCase : List[str] = use_cache
_UpperCAmelCase : Optional[int] = add_router_probs
_UpperCAmelCase : Optional[int] = router_z_loss_coef
_UpperCAmelCase : List[str] = router_aux_loss_coef
_UpperCAmelCase : Union[str, Any] = self.feed_forward_proj.split('''-''')
_UpperCAmelCase : int = act_info[-1]
_UpperCAmelCase : int = act_info[0] == '''gated'''
if len(_A) > 1 and act_info[0] != "gated" or len(_A) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase : Optional[Any] = '''gelu_new'''
super().__init__(
pad_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , **_A , )
| 485 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A : List[Any] = {
"configuration_autoformer": [
"AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"AutoformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : List[str] = [
"AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"AutoformerForPrediction",
"AutoformerModel",
"AutoformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
A : Any = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 | import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionTextToImagePipeline
from diffusers.utils.testing_utils import nightly, require_torch_gpu, torch_device
A : List[str] = False
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
pass
@nightly
@require_torch_gpu
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __A ( self : Union[str, Any] ) -> List[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __A ( self : str ) -> Any:
SCREAMING_SNAKE_CASE_ = VersatileDiffusionTextToImagePipeline.from_pretrained("shi-labs/versatile-diffusion" )
# remove text_unet
pipe.remove_unused_weights()
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "A painting of a squirrel eating a burger "
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__magic_name__ )
SCREAMING_SNAKE_CASE_ = VersatileDiffusionTextToImagePipeline.from_pretrained(__magic_name__ )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = generator.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="numpy" ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def __A ( self : int ) -> Optional[int]:
SCREAMING_SNAKE_CASE_ = VersatileDiffusionTextToImagePipeline.from_pretrained(
"shi-labs/versatile-diffusion" , torch_dtype=torch.floataa )
pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
SCREAMING_SNAKE_CASE_ = "A painting of a squirrel eating a burger "
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=__magic_name__ , generator=__magic_name__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="numpy" ).images
SCREAMING_SNAKE_CASE_ = image[0, 253:256, 253:256, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE_ = np.array([0.3367, 0.3169, 0.2656, 0.3870, 0.4790, 0.3796, 0.4009, 0.4878, 0.4778] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 356 | 0 |
'''simple docstring'''
from string import ascii_uppercase
_UpperCAmelCase : List[str] = {str(ord(c) - 55): c for c in ascii_uppercase}
def UpperCamelCase ( lowercase_ : int , lowercase_ : int ) -> str:
'''simple docstring'''
if isinstance(lowercase_ , lowercase_ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(lowercase_ , lowercase_ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(lowercase_ , lowercase_ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 3_6:
raise ValueError('''base must be <= 36''' )
lowercase =''''''
lowercase =0
lowercase =0
while div != 1:
lowercase , lowercase =divmod(lowercase_ , lowercase_ )
if base >= 1_1 and 9 < mod < 3_6:
lowercase =ALPHABET_VALUES[str(lowercase_ )]
else:
lowercase =str(lowercase_ )
new_value += actual_value
lowercase =num // base
lowercase =div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(lowercase_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(10_00):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 72 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'''facebook/xmod-base''': '''https://huggingface.co/facebook/xmod-base/resolve/main/config.json''',
'''facebook/xmod-large-prenorm''': '''https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json''',
'''facebook/xmod-base-13-125k''': '''https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-125k''': '''https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json''',
'''facebook/xmod-base-30-195k''': '''https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json''',
'''facebook/xmod-base-60-125k''': '''https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json''',
'''facebook/xmod-base-60-265k''': '''https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json''',
'''facebook/xmod-base-75-125k''': '''https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json''',
'''facebook/xmod-base-75-269k''': '''https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json''',
}
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = '''xmod'''
def __init__( self : str , _UpperCAmelCase : Optional[Any]=30_522 , _UpperCAmelCase : Any=768 , _UpperCAmelCase : int=12 , _UpperCAmelCase : List[str]=12 , _UpperCAmelCase : Dict=3_072 , _UpperCAmelCase : Union[str, Any]="gelu" , _UpperCAmelCase : Optional[int]=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : List[str]=512 , _UpperCAmelCase : Dict=2 , _UpperCAmelCase : Optional[int]=0.02 , _UpperCAmelCase : Any=1E-1_2 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=0 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]="absolute" , _UpperCAmelCase : Tuple=True , _UpperCAmelCase : Tuple=None , _UpperCAmelCase : int=False , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=False , _UpperCAmelCase : Dict=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : Tuple=("en_XX",) , _UpperCAmelCase : List[str]=None , **_UpperCAmelCase : Optional[Any] , ):
super().__init__(pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , **_UpperCAmelCase )
_A = vocab_size
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = hidden_act
_A = intermediate_size
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = max_position_embeddings
_A = type_vocab_size
_A = initializer_range
_A = layer_norm_eps
_A = position_embedding_type
_A = use_cache
_A = classifier_dropout
_A = pre_norm
_A = adapter_reduction_factor
_A = adapter_layer_norm
_A = adapter_reuse_layer_norm
_A = ln_before_adapter
_A = list(_UpperCAmelCase )
_A = default_language
class lowercase_ ( __lowerCAmelCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Dict ):
if self.task == "multiple-choice":
_A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 7 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class A__ ( __UpperCAmelCase ):
"""simple docstring"""
__A : torch.FloatTensor
class A__ ( __UpperCAmelCase , __UpperCAmelCase ):
"""simple docstring"""
@register_to_config
def __init__( self , lowercase = 3 , lowercase = 3 , lowercase = ("DownEncoderBlock2D",) , lowercase = ("UpDecoderBlock2D",) , lowercase = (64,) , lowercase = 1 , lowercase = "silu" , lowercase = 3 , lowercase = 32 , lowercase = 256 , lowercase = 32 , lowercase = None , lowercase = 0.1_82_15 , lowercase = "group" , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
a__ : List[Any] = Encoder(
in_channels=lowercase , out_channels=lowercase , down_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , double_z=lowercase , )
a__ : int = vq_embed_dim if vq_embed_dim is not None else latent_channels
a__ : Any = nn.Convad(lowercase , lowercase , 1)
a__ : List[str] = VectorQuantizer(lowercase , lowercase , beta=0.25 , remap=lowercase , sane_index_shape=lowercase)
a__ : Any = nn.Convad(lowercase , lowercase , 1)
# pass init params to Decoder
a__ : List[str] = Decoder(
in_channels=lowercase , out_channels=lowercase , up_block_types=lowercase , block_out_channels=lowercase , layers_per_block=lowercase , act_fn=lowercase , norm_num_groups=lowercase , norm_type=lowercase , )
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = True) -> VQEncoderOutput:
'''simple docstring'''
a__ : List[Any] = self.encoder(lowercase)
a__ : Dict = self.quant_conv(lowercase)
if not return_dict:
return (h,)
return VQEncoderOutput(latents=lowercase)
@apply_forward_hook
def __lowercase ( self , lowercase , lowercase = False , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if not force_not_quantize:
a__ : Dict = self.quantize(lowercase)
else:
a__ : List[Any] = h
a__ : List[Any] = self.post_quant_conv(lowercase)
a__ : Tuple = self.decoder(lowercase , quant if self.config.norm_type == 'spatial' else None)
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
def __lowercase ( self , lowercase , lowercase = True) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
a__ : int = sample
a__ : Any = self.encode(lowercase).latents
a__ : Optional[Any] = self.decode(lowercase).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowercase)
| 703 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
lowercase : List[str] = logging.get_logger("""transformers.models.encodec""")
lowercase : Optional[Any] = {
"""quantizer.vq.layers.*._codebook.inited""": """quantizer.layers.*.codebook.inited""",
"""quantizer.vq.layers.*._codebook.cluster_size""": """quantizer.layers.*.codebook.cluster_size""",
"""quantizer.vq.layers.*._codebook.embed""": """quantizer.layers.*.codebook.embed""",
"""quantizer.vq.layers.*._codebook.embed_avg""": """quantizer.layers.*.codebook.embed_avg""",
}
lowercase : Tuple = {
"""encoder.model.0.conv.conv""": """encoder.layers.0.conv""",
"""encoder.model.1.block.1.conv.conv""": """encoder.layers.1.block.1.conv""",
"""encoder.model.1.block.3.conv.conv""": """encoder.layers.1.block.3.conv""",
"""encoder.model.1.shortcut.conv.conv""": """encoder.layers.1.shortcut.conv""",
"""encoder.model.3.conv.conv""": """encoder.layers.3.conv""",
"""encoder.model.4.block.1.conv.conv""": """encoder.layers.4.block.1.conv""",
"""encoder.model.4.block.3.conv.conv""": """encoder.layers.4.block.3.conv""",
"""encoder.model.4.shortcut.conv.conv""": """encoder.layers.4.shortcut.conv""",
"""encoder.model.6.conv.conv""": """encoder.layers.6.conv""",
"""encoder.model.7.block.1.conv.conv""": """encoder.layers.7.block.1.conv""",
"""encoder.model.7.block.3.conv.conv""": """encoder.layers.7.block.3.conv""",
"""encoder.model.7.shortcut.conv.conv""": """encoder.layers.7.shortcut.conv""",
"""encoder.model.9.conv.conv""": """encoder.layers.9.conv""",
"""encoder.model.10.block.1.conv.conv""": """encoder.layers.10.block.1.conv""",
"""encoder.model.10.block.3.conv.conv""": """encoder.layers.10.block.3.conv""",
"""encoder.model.10.shortcut.conv.conv""": """encoder.layers.10.shortcut.conv""",
"""encoder.model.12.conv.conv""": """encoder.layers.12.conv""",
"""encoder.model.13.lstm""": """encoder.layers.13.lstm""",
"""encoder.model.15.conv.conv""": """encoder.layers.15.conv""",
}
lowercase : List[Any] = {
"""encoder.model.0.conv.norm""": """encoder.layers.0.norm""",
"""encoder.model.1.block.1.conv.norm""": """encoder.layers.1.block.1.norm""",
"""encoder.model.1.block.3.conv.norm""": """encoder.layers.1.block.3.norm""",
"""encoder.model.1.shortcut.conv.norm""": """encoder.layers.1.shortcut.norm""",
"""encoder.model.3.conv.norm""": """encoder.layers.3.norm""",
"""encoder.model.4.block.1.conv.norm""": """encoder.layers.4.block.1.norm""",
"""encoder.model.4.block.3.conv.norm""": """encoder.layers.4.block.3.norm""",
"""encoder.model.4.shortcut.conv.norm""": """encoder.layers.4.shortcut.norm""",
"""encoder.model.6.conv.norm""": """encoder.layers.6.norm""",
"""encoder.model.7.block.1.conv.norm""": """encoder.layers.7.block.1.norm""",
"""encoder.model.7.block.3.conv.norm""": """encoder.layers.7.block.3.norm""",
"""encoder.model.7.shortcut.conv.norm""": """encoder.layers.7.shortcut.norm""",
"""encoder.model.9.conv.norm""": """encoder.layers.9.norm""",
"""encoder.model.10.block.1.conv.norm""": """encoder.layers.10.block.1.norm""",
"""encoder.model.10.block.3.conv.norm""": """encoder.layers.10.block.3.norm""",
"""encoder.model.10.shortcut.conv.norm""": """encoder.layers.10.shortcut.norm""",
"""encoder.model.12.conv.norm""": """encoder.layers.12.norm""",
"""encoder.model.15.conv.norm""": """encoder.layers.15.norm""",
}
lowercase : int = {
"""decoder.model.0.conv.conv""": """decoder.layers.0.conv""",
"""decoder.model.1.lstm""": """decoder.layers.1.lstm""",
"""decoder.model.3.convtr.convtr""": """decoder.layers.3.conv""",
"""decoder.model.4.block.1.conv.conv""": """decoder.layers.4.block.1.conv""",
"""decoder.model.4.block.3.conv.conv""": """decoder.layers.4.block.3.conv""",
"""decoder.model.4.shortcut.conv.conv""": """decoder.layers.4.shortcut.conv""",
"""decoder.model.6.convtr.convtr""": """decoder.layers.6.conv""",
"""decoder.model.7.block.1.conv.conv""": """decoder.layers.7.block.1.conv""",
"""decoder.model.7.block.3.conv.conv""": """decoder.layers.7.block.3.conv""",
"""decoder.model.7.shortcut.conv.conv""": """decoder.layers.7.shortcut.conv""",
"""decoder.model.9.convtr.convtr""": """decoder.layers.9.conv""",
"""decoder.model.10.block.1.conv.conv""": """decoder.layers.10.block.1.conv""",
"""decoder.model.10.block.3.conv.conv""": """decoder.layers.10.block.3.conv""",
"""decoder.model.10.shortcut.conv.conv""": """decoder.layers.10.shortcut.conv""",
"""decoder.model.12.convtr.convtr""": """decoder.layers.12.conv""",
"""decoder.model.13.block.1.conv.conv""": """decoder.layers.13.block.1.conv""",
"""decoder.model.13.block.3.conv.conv""": """decoder.layers.13.block.3.conv""",
"""decoder.model.13.shortcut.conv.conv""": """decoder.layers.13.shortcut.conv""",
"""decoder.model.15.conv.conv""": """decoder.layers.15.conv""",
}
lowercase : int = {
"""decoder.model.0.conv.norm""": """decoder.layers.0.norm""",
"""decoder.model.3.convtr.norm""": """decoder.layers.3.norm""",
"""decoder.model.4.block.1.conv.norm""": """decoder.layers.4.block.1.norm""",
"""decoder.model.4.block.3.conv.norm""": """decoder.layers.4.block.3.norm""",
"""decoder.model.4.shortcut.conv.norm""": """decoder.layers.4.shortcut.norm""",
"""decoder.model.6.convtr.norm""": """decoder.layers.6.norm""",
"""decoder.model.7.block.1.conv.norm""": """decoder.layers.7.block.1.norm""",
"""decoder.model.7.block.3.conv.norm""": """decoder.layers.7.block.3.norm""",
"""decoder.model.7.shortcut.conv.norm""": """decoder.layers.7.shortcut.norm""",
"""decoder.model.9.convtr.norm""": """decoder.layers.9.norm""",
"""decoder.model.10.block.1.conv.norm""": """decoder.layers.10.block.1.norm""",
"""decoder.model.10.block.3.conv.norm""": """decoder.layers.10.block.3.norm""",
"""decoder.model.10.shortcut.conv.norm""": """decoder.layers.10.shortcut.norm""",
"""decoder.model.12.convtr.norm""": """decoder.layers.12.norm""",
"""decoder.model.13.block.1.conv.norm""": """decoder.layers.13.block.1.norm""",
"""decoder.model.13.block.3.conv.norm""": """decoder.layers.13.block.3.norm""",
"""decoder.model.13.shortcut.conv.norm""": """decoder.layers.13.shortcut.norm""",
"""decoder.model.15.conv.norm""": """decoder.layers.15.norm""",
}
lowercase : Union[str, Any] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
lowercase : Optional[int] = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
lowercase : Optional[int] = []
lowercase : Optional[int] = []
def A_ ( A__ , A__ , A__ , A__ , A__ ) -> Optional[Any]:
for attribute in key.split('.' ):
a__ : str = getattr(A__ , A__ )
if weight_type is not None:
a__ : List[str] = getattr(A__ , A__ ).shape
else:
a__ : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
a__ : Tuple = value
elif weight_type == "weight_g":
a__ : Optional[Any] = value
elif weight_type == "weight_v":
a__ : Tuple = value
elif weight_type == "bias":
a__ : Tuple = value
elif weight_type == "running_mean":
a__ : List[Any] = value
elif weight_type == "running_var":
a__ : Optional[int] = value
elif weight_type == "num_batches_tracked":
a__ : Optional[int] = value
elif weight_type == "weight_ih_l0":
a__ : Dict = value
elif weight_type == "weight_hh_l0":
a__ : Any = value
elif weight_type == "bias_ih_l0":
a__ : Optional[int] = value
elif weight_type == "bias_hh_l0":
a__ : List[Any] = value
elif weight_type == "weight_ih_l1":
a__ : str = value
elif weight_type == "weight_hh_l1":
a__ : int = value
elif weight_type == "bias_ih_l1":
a__ : List[Any] = value
elif weight_type == "bias_hh_l1":
a__ : Any = value
else:
a__ : Tuple = value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def A_ ( A__ , A__ ) -> Dict:
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__ : List[Any] = key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def A_ ( A__ , A__ , A__ ) -> Union[str, Any]:
a__ : Dict = []
if model_name == "encodec_24khz" or "encodec_32khz":
a__ : Any = MAPPING_24K
elif model_name == "encodec_48khz":
a__ : Optional[int] = MAPPING_48K
else:
raise ValueError(F'Unsupported model: {model_name}' )
for name, value in orig_dict.items():
if should_ignore(A__ , A__ ):
logger.info(F'{name} was ignored' )
continue
a__ : Union[str, Any] = False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__ : List[str] = key.split('.*.' )
if prefix in name and suffix in name:
a__ : int = suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__ : Dict = True
if "*" in mapped_key:
a__ : List[str] = name.split(A__ )[0].split('.' )[-2]
a__ : str = mapped_key.replace('*' , A__ )
if "weight_g" in name:
a__ : Any = 'weight_g'
elif "weight_v" in name:
a__ : List[str] = 'weight_v'
elif "weight_ih_l0" in name:
a__ : List[str] = 'weight_ih_l0'
elif "weight_hh_l0" in name:
a__ : List[str] = 'weight_hh_l0'
elif "bias_ih_l0" in name:
a__ : Optional[Any] = 'bias_ih_l0'
elif "bias_hh_l0" in name:
a__ : List[str] = 'bias_hh_l0'
elif "weight_ih_l1" in name:
a__ : str = 'weight_ih_l1'
elif "weight_hh_l1" in name:
a__ : Optional[Any] = 'weight_hh_l1'
elif "bias_ih_l1" in name:
a__ : Optional[int] = 'bias_ih_l1'
elif "bias_hh_l1" in name:
a__ : int = 'bias_hh_l1'
elif "bias" in name:
a__ : int = 'bias'
elif "weight" in name:
a__ : List[str] = 'weight'
elif "running_mean" in name:
a__ : Dict = 'running_mean'
elif "running_var" in name:
a__ : int = 'running_var'
elif "num_batches_tracked" in name:
a__ : Optional[Any] = 'num_batches_tracked'
else:
a__ : Tuple = None
set_recursively(A__ , A__ , A__ , A__ , A__ )
continue
if not is_used:
unused_weights.append(A__ )
logger.warning(F'Unused weights: {unused_weights}' )
@torch.no_grad()
def A_ ( A__ , A__ , A__ , A__=None , A__=None , ) -> List[Any]:
if config_path is not None:
a__ : Union[str, Any] = EncodecConfig.from_pretrained(A__ )
else:
a__ : Union[str, Any] = EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__ : int = [8, 5, 4, 4]
a__ : Tuple = [2.2]
a__ : Optional[Any] = 64
a__ : int = 3_2000
a__ : int = 2048
a__ : Tuple = False
a__ : int = False
a__ : int = False
elif model_name == "encodec_48khz":
a__ : Tuple = [8, 5, 4, 2]
a__ : Optional[int] = [3.0, 6.0, 12.0, 24.0]
a__ : Any = 4_8000
a__ : List[Any] = 2
a__ : Union[str, Any] = False
a__ : Tuple = 'time_group_norm'
a__ : Tuple = True
a__ : Optional[int] = 1.0
a__ : Tuple = 0.01
else:
raise ValueError(F'Unknown model name: {model_name}' )
a__ : Optional[Any] = EncodecModel(A__ )
a__ : Optional[int] = EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(A__ )
a__ : int = torch.load(A__ )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__ : int = original_checkpoint['best_state']
recursively_load_weights(A__ , A__ , A__ )
model.save_pretrained(A__ )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(A__ )
model.push_to_hub(A__ )
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--model""",
default="""encodec_24khz""",
type=str,
help="""The model to convert. Should be one of 'encodec_24khz', 'encodec_32khz', 'encodec_48khz'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to original checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
lowercase : str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 392 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = "▁"
__lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model"}
__lowerCAmelCase = {
"vocab_file": {
"facebook/xglm-564M": "https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model",
}
}
__lowerCAmelCase = {
"facebook/xglm-564M": 2_0_4_8,
}
class __SCREAMING_SNAKE_CASE ( lowercase):
__SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
__SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_VOCAB_FILES_MAP
__SCREAMING_SNAKE_CASE : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__SCREAMING_SNAKE_CASE : int = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : int="<s>" , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : List[Any]="</s>" , __UpperCamelCase : List[str]="<s>" , __UpperCamelCase : str="<unk>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[Any] , ):
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ):
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : List[Any] , __UpperCamelCase : Optional[int] ):
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def UpperCAmelCase__ ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ):
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__ ( self : Any ):
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__ ( self : Tuple ):
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__ ( self : Tuple , __UpperCamelCase : str ):
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] , __UpperCamelCase : Dict ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__ ( self : Dict , __UpperCamelCase : Optional[Any] ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__ ( self : Any , __UpperCamelCase : Any ):
_UpperCAmelCase = "".join(__UpperCamelCase ).replace(__UpperCamelCase , " " ).strip()
return out_string
def UpperCAmelCase__ ( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , "wb" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 684 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 684 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=_lowercase):
snake_case__ = ['''onnx''']
def __init__( self : Dict , *__UpperCamelCase : Any , **__UpperCamelCase : Union[str, Any] ) -> Optional[int]:
requires_backends(self , ['''onnx'''] )
@classmethod
def _UpperCamelCase ( cls : Optional[Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[Any] ) -> List[str]:
requires_backends(cls , ['''onnx'''] )
@classmethod
def _UpperCamelCase ( cls : Any , *__UpperCamelCase : Any , **__UpperCamelCase : Dict ) -> Tuple:
requires_backends(cls , ['''onnx'''] )
| 706 | """simple docstring"""
def lowercase ( a__ : str , a__ : str ) -> float:
def get_matched_characters(a__ : str , a__ : str ) -> str:
_UpperCamelCase = []
_UpperCamelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCamelCase = int(max(0 , i - limit ) )
_UpperCamelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(a__ )
_UpperCamelCase = F'''{_stra[0:_stra.index(a__ )]} {_stra[_stra.index(a__ ) + 1:]}'''
return "".join(a__ )
# matching characters
_UpperCamelCase = get_matched_characters(a__ , a__ )
_UpperCamelCase = get_matched_characters(a__ , a__ )
_UpperCamelCase = len(a__ )
# transposition
_UpperCamelCase = (
len([(ca, ca) for ca, ca in zip(a__ , a__ ) if ca != ca] ) // 2
)
if not match_count:
_UpperCamelCase = 0.0
else:
_UpperCamelCase = (
1
/ 3
* (
match_count / len(a__ )
+ match_count / len(a__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCamelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 342 | 0 |
"""simple docstring"""
import comet # From: unbabel-comet
import torch
import datasets
A_ : Dict = datasets.logging.get_logger(__name__)
A_ : Dict = "\\n@inproceedings{rei-EtAl:2020:WMT,\n author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},\n title = {Unbabel's Participation in the WMT20 Metrics Shared Task},\n booktitle = {Proceedings of the Fifth Conference on Machine Translation},\n month = {November},\n year = {2020},\n address = {Online},\n publisher = {Association for Computational Linguistics},\n pages = {909--918},\n}\n@inproceedings{rei-etal-2020-comet,\n title = \"{COMET}: A Neural Framework for {MT} Evaluation\",\n author = \"Rei, Ricardo and\n Stewart, Craig and\n Farinha, Ana C and\n Lavie, Alon\",\n booktitle = \"Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/2020.emnlp-main.213\",\n pages = \"2685--2702\",\n}\n"
A_ : int = "\\nCrosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA's or MQM).\nWith the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.\n\nSee the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.\n"
A_ : int = "\nCOMET score.\n\nArgs:\n\n`sources` (list of str): Source sentences\n`predictions` (list of str): candidate translations\n`references` (list of str): reference translations\n`cuda` (bool): If set to True, runs COMET using GPU\n`show_progress` (bool): Shows progress\n`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.\n\nReturns:\n `samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.\n `scores`: List of scores.\n\nExamples:\n\n >>> comet_metric = datasets.load_metric('comet')\n >>> # comet_metric = load_metric('comet', 'wmt20-comet-da') # you can also choose which model to use\n >>> source = [\"Dem Feuer konnte Einhalt geboten werden\", \"Schulen und Kindergärten wurden eröffnet.\"]\n >>> hypothesis = [\"The fire could be stopped\", \"Schools and kindergartens were open\"]\n >>> reference = [\"They were able to control the fire.\", \"Schools and kindergartens opened\"]\n >>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [0.19, 0.92]\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class lowerCamelCase (datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://unbabel.github.io/COMET/html/index.html""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""sources""": datasets.Value("""string""" , id="""sequence""" ),
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/Unbabel/COMET"""] , reference_urls=[
"""https://github.com/Unbabel/COMET""",
"""https://www.aclweb.org/anthology/2020.emnlp-main.213/""",
"""http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6""",
] , )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Optional[Any] ) -> Tuple:
if self.config_name == "default":
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model("""wmt20-comet-da""" ) )
else:
SCREAMING_SNAKE_CASE__ = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def SCREAMING_SNAKE_CASE ( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[str] , __UpperCAmelCase : int=None , __UpperCAmelCase : Optional[int]=False ) -> Union[str, Any]:
if gpus is None:
SCREAMING_SNAKE_CASE__ = 1 if torch.cuda.is_available() else 0
SCREAMING_SNAKE_CASE__ = {"""src""": sources, """mt""": predictions, """ref""": references}
SCREAMING_SNAKE_CASE__ = [dict(zip(__UpperCAmelCase , __UpperCAmelCase ) ) for t in zip(*data.values() )]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.scorer.predict(__UpperCAmelCase , gpus=__UpperCAmelCase , progress_bar=__UpperCAmelCase )
return {"mean_score": mean_score, "scores": scores}
| 196 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowerCamelCase (unittest.TestCase ):
def __init__( self : Tuple , __UpperCAmelCase : str , __UpperCAmelCase : Any=7 , __UpperCAmelCase : Union[str, Any]=3 , __UpperCAmelCase : Any=1_8 , __UpperCAmelCase : Tuple=3_0 , __UpperCAmelCase : List[Any]=4_0_0 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : int=None , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : Any=None , __UpperCAmelCase : Any=True , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = size if size is not None else {"""shortest_edge""": 2_0}
SCREAMING_SNAKE_CASE__ = crop_size if crop_size is not None else {"""height""": 1_8, """width""": 1_8}
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = min_resolution
SCREAMING_SNAKE_CASE__ = max_resolution
SCREAMING_SNAKE_CASE__ = do_resize
SCREAMING_SNAKE_CASE__ = size
SCREAMING_SNAKE_CASE__ = do_center_crop
SCREAMING_SNAKE_CASE__ = crop_size
SCREAMING_SNAKE_CASE__ = do_flip_channel_order
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowerCamelCase (A__ ,unittest.TestCase ):
lowerCamelCase__ : Tuple = MobileViTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
SCREAMING_SNAKE_CASE__ = MobileViTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCAmelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """size""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """center_crop""" ) )
self.assertTrue(hasattr(__UpperCAmelCase , """do_flip_channel_order""" ) )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 2_0} )
self.assertEqual(image_processor.crop_size , {"""height""": 1_8, """width""": 1_8} )
SCREAMING_SNAKE_CASE__ = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 4_2} )
self.assertEqual(image_processor.crop_size , {"""height""": 8_4, """width""": 8_4} )
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , numpify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
# Initialize image_processing
SCREAMING_SNAKE_CASE__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCAmelCase , torchify=__UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCAmelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ = image_processing(__UpperCAmelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 196 | 1 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
}
lowercase__ = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
]
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
for attribute in key.split('.' ):
UpperCAmelCase : Tuple = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if weight_type is not None:
UpperCAmelCase : List[str] = getattr(UpperCAmelCase_ , UpperCAmelCase_ ).shape
else:
UpperCAmelCase : Union[str, Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
UpperCAmelCase : Union[str, Any] = value
elif weight_type == "weight_v":
UpperCAmelCase : str = value
elif weight_type == "bias":
UpperCAmelCase : str = value
else:
UpperCAmelCase : str = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : int = []
UpperCAmelCase : Optional[int] = fairseq_model.state_dict()
UpperCAmelCase : List[str] = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCAmelCase : Dict = None
for name, value in fairseq_dict.items():
UpperCAmelCase : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
UpperCAmelCase : Any = True
elif name.split('.' )[0] == "proj":
UpperCAmelCase : List[Any] = fairseq_model.proj
UpperCAmelCase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
UpperCAmelCase : Optional[Any] = True
if "*" in mapped_key:
UpperCAmelCase : int = name.split(UpperCAmelCase_ )[0].split('.' )[-2]
UpperCAmelCase : Union[str, Any] = mapped_key.replace('*' , UpperCAmelCase_ )
if "weight_g" in name:
UpperCAmelCase : Optional[Any] = 'weight_g'
elif "weight_v" in name:
UpperCAmelCase : Any = 'weight_v'
elif "bias" in name:
UpperCAmelCase : Dict = 'bias'
elif "weight" in name:
UpperCAmelCase : Optional[int] = 'weight'
else:
UpperCAmelCase : Union[str, Any] = None
set_recursively(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
continue
if not is_used:
unused_weights.append(UpperCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
return proj_weight
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
UpperCAmelCase : str = full_name.split('conv_layers.' )[-1]
UpperCAmelCase : Optional[Any] = name.split('.' )
UpperCAmelCase : List[str] = int(items[0] )
UpperCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCAmelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCAmelCase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCAmelCase : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCAmelCase : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(UpperCAmelCase_ )
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = emb.weight.shape
UpperCAmelCase : List[str] = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ , bias=UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = emb.weight.data
return lin_layer
def UpperCamelCase( UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'r' , encoding='utf-8' ) as f:
UpperCAmelCase : Dict = f.readlines()
UpperCAmelCase : List[str] = [line.split(' ' )[0] for line in lines]
UpperCAmelCase : int = len(UpperCAmelCase_ )
UpperCAmelCase : int = {
'<s>': 0,
'<pad>': 1,
'</s>': 2,
'<unk>': 3,
}
vocab_dict.update(dict(zip(UpperCAmelCase_ , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , ):
UpperCAmelCase : List[str] = WavaVecaConfig.from_pretrained(UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = SpeechaTextaConfig.from_pretrained(
UpperCAmelCase_ , vocab_size=UpperCAmelCase_ , decoder_layers=UpperCAmelCase_ , do_stable_layer_norm=UpperCAmelCase_ )
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
UpperCAmelCase : Optional[Any] = model[0].eval()
# set weights for wav2vec2 encoder
UpperCAmelCase : Any = WavaVecaModel(UpperCAmelCase_ )
UpperCAmelCase : str = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase_ )
UpperCAmelCase : int = SpeechaTextaForCausalLM(UpperCAmelCase_ )
UpperCAmelCase , UpperCAmelCase : List[Any] = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase_ )
# set output linear layer
unexpected_keys.remove('embed_out' )
UpperCAmelCase : str = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(F"""The following keys are missing when loading the decoder weights: {missing_keys}""" )
logger.warning(F"""The following keys are unexpected when loading the decoder weights: {unexpected_keys}""" )
UpperCAmelCase : List[str] = SpeechEncoderDecoderModel(encoder=UpperCAmelCase_ , decoder=UpperCAmelCase_ )
UpperCAmelCase : Any = False
# add projection layer
UpperCAmelCase : Optional[Any] = nn.Parameter(projection_layer.weight )
UpperCAmelCase : List[str] = nn.Parameter(projection_layer.bias )
UpperCAmelCase : Tuple = create_vocab_dict(UpperCAmelCase_ )
with open(os.path.join(UpperCAmelCase_ , 'vocab.json' ) , 'w' ) as fp:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
UpperCAmelCase : Optional[int] = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase_ , 'vocab.json' ) )
tokenizer.save_pretrained(UpperCAmelCase_ )
UpperCAmelCase : str = hf_wavavec.config.to_dict()
UpperCAmelCase : int = tokenizer.pad_token_id
UpperCAmelCase : str = tokenizer.bos_token_id
UpperCAmelCase : Union[str, Any] = tokenizer.eos_token_id
UpperCAmelCase : Any = 'speech_to_text_2'
UpperCAmelCase : Any = 'wav2vec2'
UpperCAmelCase : str = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase_ )
hf_wavavec.save_pretrained(UpperCAmelCase_ )
feature_extractor.save_pretrained(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument(
"--encoder_config_path",
default="facebook/wav2vec2-large-lv60",
type=str,
help="Path to hf encoder wav2vec2 checkpoint config",
)
parser.add_argument(
"--decoder_config_path",
default="facebook/s2t-small-mustc-en-fr-st",
type=str,
help="Path to hf decoder s2t checkpoint config",
)
parser.add_argument("--vocab_size", default=10224, type=int, help="Vocab size of decoder")
parser.add_argument("--num_decoder_layers", default=7, type=int, help="Number of decoder layers")
lowercase__ = parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 695 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class A_ ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase_ ( self : int ) -> Union[str, Any]:
torch.manual_seed(0 )
UpperCAmelCase : Any = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=('DownBlock2D', 'AttnDownBlock2D') , up_block_types=('AttnUpBlock2D', 'UpBlock2D') , )
return model
def UpperCAmelCase_ ( self : str ) -> Optional[Any]:
UpperCAmelCase : Dict = self.dummy_uncond_unet
UpperCAmelCase : Dict = KarrasVeScheduler()
UpperCAmelCase : str = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(num_inference_steps=2 , generator=lowercase_ , output_type='numpy' , return_dict=lowercase_ )[0]
UpperCAmelCase : Optional[int] = image[0, -3:, -3:, -1]
UpperCAmelCase : int = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase : Any = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class A_ ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase_ ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase : Dict = 'google/ncsnpp-celebahq-256'
UpperCAmelCase : Any = UNetaDModel.from_pretrained(lowercase_ )
UpperCAmelCase : Union[str, Any] = KarrasVeScheduler()
UpperCAmelCase : Dict = KarrasVePipeline(unet=lowercase_ , scheduler=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
UpperCAmelCase : Union[str, Any] = torch.manual_seed(0 )
UpperCAmelCase : Dict = pipe(num_inference_steps=20 , generator=lowercase_ , output_type='numpy' ).images
UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCAmelCase : Optional[int] = np.array([0.578, 0.5811, 0.5924, 0.5809, 0.587, 0.5886, 0.5861, 0.5802, 0.586] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 695 | 1 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
a = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size for training."} )
a = field(default=2 , metadata={"help": "Batch size for evaluation."} )
a = field(default=0.1 , metadata={"help": "Value of weight decay."} )
a = field(
default=1_00_00 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
a = field(default=2e-4 , metadata={"help": "Learning rate fo training."} )
a = field(default="cosine" , metadata={"help": "Learning rate."} )
a = field(
default=7_50 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
a = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
a = field(
default=A__ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
a = field(default=5_00_00 , metadata={"help": "Maximum number of training steps."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=10_24 , metadata={"help": "Sequence lengths used for training."} )
a = field(default=1 , metadata={"help": "Training seed."} )
a = field(
default=10_24 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
a = field(
default=A__ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
a = field(default=A__ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
a = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
a = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
a = field(default=10_24 , metadata={"help": "Length of sequences to be evaluated."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
a = field(default=A__ , metadata={"help": "Number of workers used for code evaluation."} )
a = field(
default=A__ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
a = field(
default=A__ , metadata={"help": "Sample from the language model's output distribution."} )
a = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
a = field(default=2_56 , metadata={"help": "Maximum number of newly generated tokens."} )
a = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
a = field(default=0.9_5 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
a = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
a = field(
default=2_00 , metadata={"help": "Number of completions to generate for each sample."} )
a = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
a = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
a = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default=A__ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
a = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
a = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
a = field(
default=10_00_00 , metadata={"help": "Number of files to save per JSON output file."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(
default=10_00 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
a = field(
default=1_00 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
a = field(
default=0.2_5 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
a = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
a = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
a = field(
default=A__ , metadata={"help": "If True, near-duplicate samples are removed."} )
a = field(
default=0.8_5 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
a = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
a = field(default="content" , metadata={"help": "Column containing text data to process."} )
a = field(default=20_00_00 , metadata={"help": "Number of examples to train tokenizer on."} )
a = field(
default=3_27_68 , metadata={"help": "Number of examples to train the tokenizer on."} )
a = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
a = field(default=A__ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
a = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
a = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
a = field(default=A__ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class UpperCAmelCase__ :
"""simple docstring"""
a = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
a = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
a = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
a = field(default=A__ , metadata={"help": "Push saved tokenizer to the hub."} )
| 493 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = 42
a = None
def UpperCAmelCase_ ( _A , _A=0.9_9_9 , _A="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(_A ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(_A ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
SCREAMING_SNAKE_CASE__ = []
for i in range(_A ):
SCREAMING_SNAKE_CASE__ = i / num_diffusion_timesteps
SCREAMING_SNAKE_CASE__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(_A ) / alpha_bar_fn(_A ) , _A ) )
return torch.tensor(_A , dtype=torch.floataa )
class UpperCAmelCase__ ( A__ , A__ ):
"""simple docstring"""
a = 1
@register_to_config
def __init__( self : Dict , __lowerCamelCase : int = 1000 , __lowerCamelCase : float = 0.0001 , __lowerCamelCase : float = 0.02 , __lowerCamelCase : str = "linear" , __lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None , __lowerCamelCase : bool = True , __lowerCamelCase : bool = True , __lowerCamelCase : int = 0 , __lowerCamelCase : str = "epsilon" , __lowerCamelCase : float = 1.0 , **__lowerCamelCase : str , ) -> Optional[Any]:
if kwargs.get('''set_alpha_to_one''' , __lowerCamelCase ) is not None:
SCREAMING_SNAKE_CASE__ = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''' , '''1.0.0''' , __lowerCamelCase , standard_warn=__lowerCamelCase )
SCREAMING_SNAKE_CASE__ = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
SCREAMING_SNAKE_CASE__ = torch.tensor(__lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "linear":
SCREAMING_SNAKE_CASE__ = torch.linspace(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
SCREAMING_SNAKE_CASE__ = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , __lowerCamelCase , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
SCREAMING_SNAKE_CASE__ = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
SCREAMING_SNAKE_CASE__ = 1.0 - self.betas
SCREAMING_SNAKE_CASE__ = torch.cumprod(self.alphas , dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
SCREAMING_SNAKE_CASE__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
SCREAMING_SNAKE_CASE__ = 1.0
# setable values
SCREAMING_SNAKE_CASE__ = None
SCREAMING_SNAKE_CASE__ = torch.from_numpy(np.arange(0 , __lowerCamelCase ).copy().astype(np.intaa ) )
def lowercase_ ( self : Optional[int] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : Optional[int] = None ) -> torch.FloatTensor:
return sample
def lowercase_ ( self : int , __lowerCamelCase : int , __lowerCamelCase : Union[str, torch.device] = None ) -> Union[str, Any]:
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:'''
f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle'''
f''' maximal {self.config.num_train_timesteps} timesteps.''' )
SCREAMING_SNAKE_CASE__ = num_inference_steps
SCREAMING_SNAKE_CASE__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
SCREAMING_SNAKE_CASE__ = (np.arange(0 , __lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
SCREAMING_SNAKE_CASE__ = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
self.timesteps += self.config.steps_offset
def lowercase_ ( self : List[Any] , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : int , __lowerCamelCase : torch.FloatTensor , __lowerCamelCase : float = 0.0 , __lowerCamelCase : bool = False , __lowerCamelCase : Optional[torch.FloatTensor] = None , __lowerCamelCase : bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
# 1. get previous step value (=t+1)
SCREAMING_SNAKE_CASE__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
SCREAMING_SNAKE_CASE__ = self.alphas_cumprod[timestep]
SCREAMING_SNAKE_CASE__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
SCREAMING_SNAKE_CASE__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
SCREAMING_SNAKE_CASE__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
SCREAMING_SNAKE_CASE__ = model_output
elif self.config.prediction_type == "sample":
SCREAMING_SNAKE_CASE__ = model_output
SCREAMING_SNAKE_CASE__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
SCREAMING_SNAKE_CASE__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or'''
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
SCREAMING_SNAKE_CASE__ = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
SCREAMING_SNAKE_CASE__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase , pred_original_sample=__lowerCamelCase )
def __len__( self : List[str] ) -> Union[str, Any]:
return self.config.num_train_timesteps
| 493 | 1 |
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def __lowercase ( snake_case, snake_case, snake_case = 1_0**-1_0 ):
"""simple docstring"""
__magic_name__ :Tuple = a
while True:
__magic_name__ :Any = Decimal(snake_case ) - (
Decimal(eval(snake_case ) ) / Decimal(eval(str(diff(snake_case ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case ) ) < precision: # noqa: S307
return float(snake_case )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
print(f"The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}")
# Find Square Root of 5
print(f"The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}")
# Exponential Roots
print(f"The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}")
| 180 |
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''', ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''', ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''', [None, '''v2'''] )
def __lowercase ( snake_case, snake_case, snake_case ):
"""simple docstring"""
__magic_name__ :Tuple = hf_hub_url(repo_id=snake_case, path=snake_case, revision=snake_case )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or "main"}/{quote(snake_case )}'''
| 180 | 1 |
"""simple docstring"""
from scipy.stats import pearsonr
import datasets
__UpperCamelCase : Tuple = '''
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
'''
__UpperCamelCase : Tuple = '''
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results[\'pearsonr\'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric("pearsonr")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
[\'p-value\', \'pearsonr\']
>>> print(round(results[\'pearsonr\'], 2))
-0.74
>>> print(round(results[\'p-value\'], 2))
0.15
'''
__UpperCamelCase : Any = '''
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a ( datasets.Metric ):
def UpperCamelCase__ ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def UpperCamelCase__ ( self , _snake_case , _snake_case , _snake_case=False ):
"""simple docstring"""
if return_pvalue:
lowerCAmelCase = pearsonr(_snake_case , _snake_case )
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(_snake_case , _snake_case )[0] )}
| 4 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__UpperCAmelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
a__ : Dict = tmp_path_factory.getbasetemp() / "cache"
a__ : int = test_hf_cache_home / "datasets"
a__ : Tuple = test_hf_cache_home / "metrics"
a__ : Any = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(lowerCAmelCase__ ) )
a__ : Optional[int] = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
a__ : Tuple = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
@pytest.fixture(autouse=lowerCAmelCase__ , scope="session" )
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , lowerCAmelCase__ )
@pytest.fixture
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , lowerCAmelCase__ ) | 642 | 0 |
'''simple docstring'''
from pathlib import Path
from typing import List
from transformers import is_torch_available, is_vision_available
from transformers.testing_utils import get_tests_dir, is_tool_test
from transformers.tools.agent_types import AGENT_TYPE_MAPPING, AgentAudio, AgentImage, AgentText
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
__snake_case =["""text""", """image""", """audio"""]
def a_ ( lowerCamelCase : List[str] ):
lowerCAmelCase = []
for input_type in input_types:
if input_type == "text":
inputs.append('Text input' )
elif input_type == "image":
inputs.append(
Image.open(Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png' ).resize((512, 512) ) )
elif input_type == "audio":
inputs.append(torch.ones(3000 ) )
elif isinstance(lowerCamelCase , lowerCamelCase ):
inputs.append(create_inputs(lowerCamelCase ) )
else:
raise ValueError(f'''Invalid type requested: {input_type}''' )
return inputs
def a_ ( lowerCamelCase : List ):
lowerCAmelCase = []
for output in outputs:
if isinstance(lowerCamelCase , (str, AgentText) ):
output_types.append('text' )
elif isinstance(lowerCamelCase , (Image.Image, AgentImage) ):
output_types.append('image' )
elif isinstance(lowerCamelCase , (torch.Tensor, AgentAudio) ):
output_types.append('audio' )
else:
raise ValueError(f'''Invalid output: {output}''' )
return output_types
@is_tool_test
class UpperCAmelCase_ :
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
self.assertTrue(hasattr(self.tool , 'inputs' ) )
self.assertTrue(hasattr(self.tool , 'outputs' ) )
lowerCAmelCase = self.tool.inputs
for _input in inputs:
if isinstance(_input , UpperCAmelCase__ ):
for __input in _input:
self.assertTrue(__input in authorized_types )
else:
self.assertTrue(_input in authorized_types )
lowerCAmelCase = self.tool.outputs
for _output in outputs:
self.assertTrue(_output in authorized_types )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
lowerCAmelCase = create_inputs(self.tool.inputs )
lowerCAmelCase = self.tool(*UpperCAmelCase__ )
# There is a single output
if len(self.tool.outputs ) == 1:
lowerCAmelCase = [outputs]
self.assertListEqual(output_types(UpperCAmelCase__ ) , self.tool.outputs )
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
self.assertTrue(hasattr(self.tool , 'description' ) )
self.assertTrue(hasattr(self.tool , 'default_checkpoint' ) )
self.assertTrue(self.tool.description.startswith('This is a tool that' ) )
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
lowerCAmelCase = create_inputs(self.tool.inputs )
lowerCAmelCase = self.tool(*UpperCAmelCase__ )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = [outputs]
self.assertEqual(len(UpperCAmelCase__ ) , len(self.tool.outputs ) )
for output, output_type in zip(UpperCAmelCase__ , self.tool.outputs ):
lowerCAmelCase = AGENT_TYPE_MAPPING[output_type]
self.assertTrue(isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) )
def __UpperCAmelCase ( self : str ) -> int:
lowerCAmelCase = create_inputs(self.tool.inputs )
lowerCAmelCase = []
for _input, input_type in zip(UpperCAmelCase__ , self.tool.inputs ):
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_inputs.append([AGENT_TYPE_MAPPING[_input_type](_input ) for _input_type in input_type] )
else:
_inputs.append(AGENT_TYPE_MAPPING[input_type](_input ) )
# Should not raise an error
lowerCAmelCase = self.tool(*UpperCAmelCase__ )
if not isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
lowerCAmelCase = [outputs]
self.assertEqual(len(UpperCAmelCase__ ) , len(self.tool.outputs ) )
| 513 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__snake_case =logging.get_logger(__name__)
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Union[str, Any] = ['''pixel_values''']
def __init__( self : int , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Dict[str, int] = None , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Union[int, float] = 1 / 2_5_5 , UpperCAmelCase__ : bool = True , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : Dict , ) -> None:
super().__init__(**UpperCAmelCase__ )
lowerCAmelCase = size if size is not None else {'height': 3_8_4, 'width': 3_8_4}
lowerCAmelCase = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowerCAmelCase = do_resize
lowerCAmelCase = size
lowerCAmelCase = resample
lowerCAmelCase = do_rescale
lowerCAmelCase = rescale_factor
lowerCAmelCase = do_normalize
lowerCAmelCase = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase = do_convert_rgb
def __UpperCAmelCase ( self : Any , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Dict[str, int] , UpperCAmelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Union[str, Any] , ) -> np.ndarray:
lowerCAmelCase = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' )
lowerCAmelCase = (size['height'], size['width'])
return resize(UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[int, float] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : Optional[int] , ) -> str:
return rescale(UpperCAmelCase__ , scale=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : str , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Union[float, List[float]] , UpperCAmelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase__ : List[Any] , ) -> np.ndarray:
return normalize(UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ , data_format=UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : ImageInput , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Dict[str, int]] = None , UpperCAmelCase__ : PILImageResampling = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[float] = None , UpperCAmelCase__ : Optional[bool] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[float, List[float]]] = None , UpperCAmelCase__ : Optional[Union[str, TensorType]] = None , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase__ : int , ) -> PIL.Image.Image:
lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase = resample if resample is not None else self.resample
lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase = image_std if image_std is not None else self.image_std
lowerCAmelCase = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase = size if size is not None else self.size
lowerCAmelCase = get_size_dict(UpperCAmelCase__ , default_to_square=UpperCAmelCase__ )
lowerCAmelCase = make_list_of_images(UpperCAmelCase__ )
if not valid_images(UpperCAmelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase = [convert_to_rgb(UpperCAmelCase__ ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase = [to_numpy_array(UpperCAmelCase__ ) for image in images]
if do_resize:
lowerCAmelCase = [self.resize(image=UpperCAmelCase__ , size=UpperCAmelCase__ , resample=UpperCAmelCase__ ) for image in images]
if do_rescale:
lowerCAmelCase = [self.rescale(image=UpperCAmelCase__ , scale=UpperCAmelCase__ ) for image in images]
if do_normalize:
lowerCAmelCase = [self.normalize(image=UpperCAmelCase__ , mean=UpperCAmelCase__ , std=UpperCAmelCase__ ) for image in images]
lowerCAmelCase = [to_channel_dimension_format(UpperCAmelCase__ , UpperCAmelCase__ ) for image in images]
lowerCAmelCase = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCAmelCase__ )
return encoded_outputs
| 513 | 1 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _A ( UpperCAmelCase_ ):
@staticmethod
@abstractmethod
def a ( lowerCamelCase__ : ArgumentParser ):
"""simple docstring"""
raise NotImplementedError()
@abstractmethod
def a ( self : List[str] ):
"""simple docstring"""
raise NotImplementedError()
| 269 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _A ( UpperCAmelCase_ , unittest.TestCase ):
lowercase_ : int = KandinskyVaaPipeline
lowercase_ : Union[str, Any] = [
'''image_embeds''',
'''negative_image_embeds''',
]
lowercase_ : int = ['''image_embeds''', '''negative_image_embeds''']
lowercase_ : str = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowercase_ : str = False
@property
def a ( self : int ):
"""simple docstring"""
return 32
@property
def a ( self : str ):
"""simple docstring"""
return 32
@property
def a ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim
@property
def a ( self : Optional[int] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def a ( self : Optional[Any] ):
"""simple docstring"""
return 1_00
@property
def a ( self : str ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : int = {
"""in_channels""": 4,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
__UpperCamelCase : Any = UNetaDConditionModel(**lowerCamelCase__ )
return model
@property
def a ( self : Union[str, Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a ( self : int ):
"""simple docstring"""
torch.manual_seed(0 )
__UpperCamelCase : Tuple = VQModel(**self.dummy_movq_kwargs )
return model
def a ( self : Tuple ):
"""simple docstring"""
__UpperCamelCase : str = self.dummy_unet
__UpperCamelCase : Optional[int] = self.dummy_movq
__UpperCamelCase : List[str] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=lowerCamelCase__ , set_alpha_to_one=lowerCamelCase__ , steps_offset=1 , prediction_type="""epsilon""" , thresholding=lowerCamelCase__ , )
__UpperCamelCase : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def a ( self : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Any=0 ):
"""simple docstring"""
__UpperCamelCase : Tuple = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
__UpperCamelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
lowerCamelCase__ )
if str(lowerCamelCase__ ).startswith("""mps""" ):
__UpperCamelCase : Optional[int] = torch.manual_seed(lowerCamelCase__ )
else:
__UpperCamelCase : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
__UpperCamelCase : Dict = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def a ( self : Dict ):
"""simple docstring"""
__UpperCamelCase : Dict = """cpu"""
__UpperCamelCase : Dict = self.get_dummy_components()
__UpperCamelCase : Union[str, Any] = self.pipeline_class(**lowerCamelCase__ )
__UpperCamelCase : int = pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : List[Any] = pipe(**self.get_dummy_inputs(lowerCamelCase__ ) )
__UpperCamelCase : List[Any] = output.images
__UpperCamelCase : int = pipe(
**self.get_dummy_inputs(lowerCamelCase__ ) , return_dict=lowerCamelCase__ , )[0]
__UpperCamelCase : List[str] = image[0, -3:, -3:, -1]
__UpperCamelCase : Tuple = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCamelCase : int = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class _A ( unittest.TestCase ):
def a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a ( self : List[Any] ):
"""simple docstring"""
__UpperCamelCase : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
__UpperCamelCase : List[str] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase__ )
__UpperCamelCase : int = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
__UpperCamelCase : Optional[int] = pipeline.to(lowerCamelCase__ )
pipeline.set_progress_bar_config(disable=lowerCamelCase__ )
__UpperCamelCase : Optional[Any] = """red cat, 4k photo"""
__UpperCamelCase : Tuple = torch.Generator(device="""cuda""" ).manual_seed(0 )
__UpperCamelCase , __UpperCamelCase : Union[str, Any] = pipe_prior(
lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
__UpperCamelCase : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
__UpperCamelCase : Union[str, Any] = pipeline(
image_embeds=lowerCamelCase__ , negative_image_embeds=lowerCamelCase__ , generator=lowerCamelCase__ , num_inference_steps=1_00 , output_type="""np""" , )
__UpperCamelCase : Any = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(lowerCamelCase__ , lowerCamelCase__ )
| 269 | 1 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=64 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCAmelCase__ : Any = parent
UpperCAmelCase__ : List[str] = batch_size
UpperCAmelCase__ : Any = seq_length
UpperCAmelCase__ : int = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Dict = use_token_type_ids
UpperCAmelCase__ : List[str] = use_labels
UpperCAmelCase__ : Dict = vocab_size
UpperCAmelCase__ : List[str] = hidden_size
UpperCAmelCase__ : Dict = num_hidden_layers
UpperCAmelCase__ : Optional[int] = num_attention_heads
UpperCAmelCase__ : List[Any] = intermediate_size
UpperCAmelCase__ : Union[str, Any] = hidden_act
UpperCAmelCase__ : List[Any] = hidden_dropout_prob
UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : Any = type_vocab_size
UpperCAmelCase__ : Optional[Any] = type_sequence_label_size
UpperCAmelCase__ : int = initializer_range
UpperCAmelCase__ : Optional[Any] = num_labels
UpperCAmelCase__ : str = num_choices
UpperCAmelCase__ : Optional[int] = scope
UpperCAmelCase__ : List[Any] = vocab_size - 1
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : int = None
if self.use_input_mask:
UpperCAmelCase__ : Dict = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : List[Any] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, token_labels
def snake_case__ ( self):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = self.prepare_config_and_inputs()
UpperCAmelCase__ : Dict = True
return config, input_ids, input_mask, token_labels
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Tuple = GPTNeoXModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase)
UpperCAmelCase__ : List[str] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : int = True
UpperCAmelCase__ : List[str] = GPTNeoXModel(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : int = model(_lowerCamelCase , attention_mask=_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : int = GPTNeoXForCausalLM(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : str = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : int = self.num_labels
UpperCAmelCase__ : int = GPTNeoXForQuestionAnswering(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = self.num_labels
UpperCAmelCase__ : Any = GPTNeoXForSequenceClassification(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Tuple = GPTNeoXForTokenClassification(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Tuple = model(_lowerCamelCase , attention_mask=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : str = GPTNeoXForCausalLM(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
# first forward pass
UpperCAmelCase__ : Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase)
UpperCAmelCase__ : str = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size)
UpperCAmelCase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2)
# append to next input_ids and
UpperCAmelCase__ : str = torch.cat([input_ids, next_tokens] , dim=-1)
UpperCAmelCase__ : List[str] = torch.cat([input_mask, next_mask] , dim=-1)
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , output_hidden_states=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = output_from_no_past["""hidden_states"""][0]
UpperCAmelCase__ : Optional[Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase , output_hidden_states=_lowerCamelCase , )["""hidden_states"""][0]
# select random slice
UpperCAmelCase__ : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1]).item()
UpperCAmelCase__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase__ : List[str] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-3))
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = config_and_inputs
UpperCAmelCase__ : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , a__ , unittest.TestCase ):
lowerCAmelCase :Dict = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase :Tuple = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase :List[str] = (
{
'''feature-extraction''': GPTNeoXModel,
'''question-answering''': GPTNeoXForQuestionAnswering,
'''text-classification''': GPTNeoXForSequenceClassification,
'''text-generation''': GPTNeoXForCausalLM,
'''token-classification''': GPTNeoXForTokenClassification,
'''zero-shot''': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase :List[str] = False
lowerCAmelCase :str = False
lowerCAmelCase :Dict = False
lowerCAmelCase :Optional[int] = False
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = GPTNeoXModelTester(self)
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=64 , num_attention_heads=8)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase__ : Union[str, Any] = None
self.model_tester.create_and_check_model_as_decoder(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase)
@unittest.skip(reason="""Feed forward chunking is not implemented""")
def snake_case__ ( self):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)])
def snake_case__ ( self , _lowerCamelCase):
UpperCAmelCase__ , UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase__ : Any = ids_tensor([1, 10] , config.vocab_size)
UpperCAmelCase__ : List[str] = ids_tensor([1, int(config.max_position_embeddings * 1.5)] , config.vocab_size)
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : Optional[int] = GPTNeoXModel(_lowerCamelCase)
original_model.to(_lowerCamelCase)
original_model.eval()
UpperCAmelCase__ : Any = original_model(_lowerCamelCase).last_hidden_state
UpperCAmelCase__ : Optional[int] = original_model(_lowerCamelCase).last_hidden_state
set_seed(42) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase__ : Dict = {"""type""": scaling_type, """factor""": 10.0}
UpperCAmelCase__ : List[Any] = GPTNeoXModel(_lowerCamelCase)
scaled_model.to(_lowerCamelCase)
scaled_model.eval()
UpperCAmelCase__ : str = scaled_model(_lowerCamelCase).last_hidden_state
UpperCAmelCase__ : Any = scaled_model(_lowerCamelCase).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5))
else:
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5))
# The output should be different for long inputs
self.assertFalse(torch.allclose(_lowerCamelCase , _lowerCamelCase , atol=1e-5))
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""")
for checkpointing in [True, False]:
UpperCAmelCase__ : Dict = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""")
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(_lowerCamelCase)
UpperCAmelCase__ : Any = tokenizer("""My favorite food is""" , return_tensors="""pt""").to(_lowerCamelCase)
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase__ : List[Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
UpperCAmelCase__ : Union[str, Any] = model.generate(**_lowerCamelCase , do_sample=_lowerCamelCase , max_new_tokens=20)
UpperCAmelCase__ : int = tokenizer.batch_decode(_lowerCamelCase)[0]
self.assertEqual(_lowerCamelCase , _lowerCamelCase) | 113 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__A ={
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 113 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : int = ['''pixel_values''']
def __init__( self , a_ = True , a_ = 32 , a_=PILImageResampling.BILINEAR , a_ = True , **a_ , ) -> None:
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = size_divisor
_UpperCAmelCase = resample
super().__init__(**a_ )
def _a ( self , a_ , a_ , a_ , a_ = None , **a_ ) -> np.ndarray:
_UpperCAmelCase , _UpperCAmelCase = get_image_size(a_ )
# Rounds the height and width down to the closest multiple of size_divisor
_UpperCAmelCase = height // size_divisor * size_divisor
_UpperCAmelCase = width // size_divisor * size_divisor
_UpperCAmelCase = resize(a_ , (new_h, new_w) , resample=a_ , data_format=a_ , **a_ )
return image
def _a ( self , a_ , a_ , a_ = None , **a_ ) -> np.ndarray:
return rescale(image=a_ , scale=a_ , data_format=a_ , **a_ )
def _a ( self , a_ , a_ = None , a_ = None , a_=None , a_ = None , a_ = None , a_ = ChannelDimension.FIRST , **a_ , ) -> BatchFeature:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = size_divisor if size_divisor is not None else self.size_divisor
_UpperCAmelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
_UpperCAmelCase = make_list_of_images(a_ )
if not valid_images(a_ ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(a_ ) for img in images]
if do_resize:
_UpperCAmelCase = [self.resize(a_ , size_divisor=a_ , resample=a_ ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(a_ , scale=1 / 255 ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(a_ , a_ ) for image in images]
_UpperCAmelCase = {"pixel_values": images}
return BatchFeature(data=a_ , tensor_type=a_ )
| 657 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase ):
lowercase_ : Optional[Any] = '''upernet'''
def __init__( self , a_=None , a_=512 , a_=0.02 , a_=[1, 2, 3, 6] , a_=True , a_=0.4 , a_=384 , a_=256 , a_=1 , a_=False , a_=255 , **a_ , ) -> List[Any]:
super().__init__(**a_ )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_UpperCAmelCase = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(a_ , a_ ):
_UpperCAmelCase = backbone_config.get("model_type" )
_UpperCAmelCase = CONFIG_MAPPING[backbone_model_type]
_UpperCAmelCase = config_class.from_dict(a_ )
_UpperCAmelCase = backbone_config
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = pool_scales
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_in_channels
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = loss_ignore_index
def _a ( self ) -> int:
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.backbone_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output
| 657 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase_ = {
'configuration_perceiver': ['PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PerceiverConfig', 'PerceiverOnnxConfig'],
'tokenization_perceiver': ['PerceiverTokenizer'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['PerceiverFeatureExtractor']
lowerCAmelCase_ = ['PerceiverImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST',
'PerceiverForImageClassificationConvProcessing',
'PerceiverForImageClassificationFourier',
'PerceiverForImageClassificationLearned',
'PerceiverForMaskedLM',
'PerceiverForMultimodalAutoencoding',
'PerceiverForOpticalFlow',
'PerceiverForSequenceClassification',
'PerceiverLayer',
'PerceiverModel',
'PerceiverPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 122 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
lowerCAmelCase_ = {
'return_dict': False,
'output_hidden_states': True,
'output_attentions': True,
'torchscript': True,
'torch_dtype': 'float16',
'use_bfloat16': True,
'tf_legacy_loss': True,
'pruned_heads': {'a': 1},
'tie_word_embeddings': False,
'is_decoder': True,
'cross_attention_hidden_size': 128,
'add_cross_attention': True,
'tie_encoder_decoder': True,
'max_length': 50,
'min_length': 3,
'do_sample': True,
'early_stopping': True,
'num_beams': 3,
'num_beam_groups': 3,
'diversity_penalty': 0.5,
'temperature': 2.0,
'top_k': 10,
'top_p': 0.7,
'typical_p': 0.2,
'repetition_penalty': 0.8,
'length_penalty': 0.8,
'no_repeat_ngram_size': 5,
'encoder_no_repeat_ngram_size': 5,
'bad_words_ids': [1, 2, 3],
'num_return_sequences': 3,
'chunk_size_feed_forward': 5,
'output_scores': True,
'return_dict_in_generate': True,
'forced_bos_token_id': 2,
'forced_eos_token_id': 3,
'remove_invalid_values': True,
'architectures': ['BertModel'],
'finetuning_task': 'translation',
'id2label': {0: 'label'},
'label2id': {'label': '0'},
'tokenizer_class': 'BertTokenizerFast',
'prefix': 'prefix',
'bos_token_id': 6,
'pad_token_id': 7,
'eos_token_id': 8,
'sep_token_id': 9,
'decoder_start_token_id': 10,
'exponential_decay_length_penalty': (5, 1.0_1),
'suppress_tokens': [0, 1],
'begin_suppress_tokens': 2,
'task_specific_params': {'translation': 'some_params'},
'problem_type': 'regression',
}
@is_staging_test
class __A ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase ( cls : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ : Any = TOKEN
HfFolder.save_token(_snake_case )
@classmethod
def UpperCAmelCase ( cls : List[str] ) -> str:
"""simple docstring"""
try:
delete_repo(token=cls._token ,repo_id='''test-config''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''valid_org/test-config-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token ,repo_id='''test-dynamic-config''' )
except HTTPError:
pass
def UpperCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Optional[Any] = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('''test-config''' ,use_auth_token=self._token )
lowercase__ : Any = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case ,getattr(_snake_case ,_snake_case ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''test-config''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(_snake_case ,repo_id='''test-config''' ,push_to_hub=_snake_case ,use_auth_token=self._token )
lowercase__ : List[str] = BertConfig.from_pretrained(f"""{USER}/test-config""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case ,getattr(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowercase__ : List[str] = BertConfig(
vocab_size=99 ,hidden_size=32 ,num_hidden_layers=5 ,num_attention_heads=4 ,intermediate_size=37 )
config.push_to_hub('''valid_org/test-config-org''' ,use_auth_token=self._token )
lowercase__ : Union[str, Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case ,getattr(_snake_case ,_snake_case ) )
# Reset repo
delete_repo(token=self._token ,repo_id='''valid_org/test-config-org''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
_snake_case ,repo_id='''valid_org/test-config-org''' ,push_to_hub=_snake_case ,use_auth_token=self._token )
lowercase__ : Union[str, Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(_snake_case ,getattr(_snake_case ,_snake_case ) )
def UpperCAmelCase ( self : int ) -> Union[str, Any]:
"""simple docstring"""
CustomConfig.register_for_auto_class()
lowercase__ : int = CustomConfig(attribute=42 )
config.push_to_hub('''test-dynamic-config''' ,use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map ,{'''AutoConfig''': '''custom_configuration.CustomConfig'''} )
lowercase__ : Optional[Any] = AutoConfig.from_pretrained(f"""{USER}/test-dynamic-config""" ,trust_remote_code=_snake_case )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__ ,'''CustomConfig''' )
self.assertEqual(new_config.attribute ,42 )
class __A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
lowercase__ : List[str] = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowercase__ : Tuple = c.n_embd + 1 # int
lowercase__ : int = c.resid_pdrop + 1.0 # float
lowercase__ : str = not c.scale_attn_weights # bool
lowercase__ : Union[str, Any] = c.summary_type + '''foo''' # str
c.update_from_string(
f"""n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}""" )
self.assertEqual(_snake_case ,c.n_embd ,'''mismatch for key: n_embd''' )
self.assertEqual(_snake_case ,c.resid_pdrop ,'''mismatch for key: resid_pdrop''' )
self.assertEqual(_snake_case ,c.scale_attn_weights ,'''mismatch for key: scale_attn_weights''' )
self.assertEqual(_snake_case ,c.summary_type ,'''mismatch for key: summary_type''' )
def UpperCAmelCase ( self : List[str] ) -> str:
"""simple docstring"""
lowercase__ : List[str] = PretrainedConfig()
lowercase__ : Optional[int] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
_snake_case ,['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] )
lowercase__ : Tuple = [key for key, value in config_common_kwargs.items() if value == getattr(_snake_case ,_snake_case )]
if len(_snake_case ) > 0:
raise ValueError(
'''The following keys are set with the default values in'''
''' `test_configuration_common.config_common_kwargs` pick another value for them:'''
f""" {", ".join(_snake_case )}.""" )
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
with self.assertRaises(_snake_case ):
# config is in subfolder, the following should not work without specifying the subfolder
lowercase__ : Optional[int] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' )
lowercase__ : str = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ,subfolder='''bert''' )
self.assertIsNotNone(_snake_case )
def UpperCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
lowercase__ : Any = mock.Mock()
lowercase__ : Dict = 500
lowercase__ : Any = {}
lowercase__ : Optional[int] = HTTPError
lowercase__ : Optional[int] = {}
# Download this model to make sure it's in the cache.
lowercase__ : str = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' ,return_value=_snake_case ) as mock_head:
lowercase__ : Tuple = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
# This check we did call the fake head request
mock_head.assert_called()
def UpperCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
lowercase__ : List[Any] = BertConfig.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowercase__ : Dict = AutoConfig.from_pretrained('''bert-base-cased''' )
lowercase__ : int = ['''config.4.0.0.json''']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(_snake_case )
lowercase__ : Optional[int] = 2
json.dump(configuration.to_dict() ,open(os.path.join(_snake_case ,'''config.4.0.0.json''' ) ,'''w''' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowercase__ : Union[str, Any] = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size ,2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowercase__ : Dict = ['''config.42.0.0.json''']
lowercase__ : Optional[Any] = 768
configuration.save_pretrained(_snake_case )
shutil.move(os.path.join(_snake_case ,'''config.4.0.0.json''' ) ,os.path.join(_snake_case ,'''config.42.0.0.json''' ) )
lowercase__ : List[Any] = AutoConfig.from_pretrained(_snake_case )
self.assertEqual(new_configuration.hidden_size ,768 )
def UpperCAmelCase ( self : Tuple ) -> Any:
"""simple docstring"""
lowercase__ : Optional[int] = '''hf-internal-testing/test-two-configs'''
import transformers as new_transformers
lowercase__ : Optional[int] = '''v4.0.0'''
lowercase__ , lowercase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
_snake_case ,return_unused_kwargs=_snake_case )
self.assertEqual(new_configuration.hidden_size ,2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(_snake_case ,{} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowercase__ : Optional[Any] = '''v3.0.0'''
lowercase__ : Union[str, Any] = old_transformers.models.auto.AutoConfig.from_pretrained(_snake_case )
self.assertEqual(old_configuration.hidden_size ,768 )
| 122 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def SCREAMING_SNAKE_CASE_ ( ) -> Union[str, Any]:
_a : Optional[Any] =ArgumentParser("""Transformers CLI tool""" ,usage="""transformers-cli <command> [<args>]""" )
_a : Any =parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(_UpperCAmelCase )
DownloadCommand.register_subcommand(_UpperCAmelCase )
EnvironmentCommand.register_subcommand(_UpperCAmelCase )
RunCommand.register_subcommand(_UpperCAmelCase )
ServeCommand.register_subcommand(_UpperCAmelCase )
UserCommands.register_subcommand(_UpperCAmelCase )
AddNewModelCommand.register_subcommand(_UpperCAmelCase )
AddNewModelLikeCommand.register_subcommand(_UpperCAmelCase )
LfsCommands.register_subcommand(_UpperCAmelCase )
PTtoTFCommand.register_subcommand(_UpperCAmelCase )
# Let's go
_a : str =parser.parse_args()
if not hasattr(_UpperCAmelCase ,"""func""" ):
parser.print_help()
exit(1 )
# Run
_a : Dict =args.func(_UpperCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 694 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ ( _UpperCAmelCase : int = 4000000 ) -> int:
_a : Optional[Any] =[]
_a , _a : Union[str, Any] =0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_UpperCAmelCase )
_a , _a : Optional[Any] =b, a + b
return sum(_UpperCAmelCase )
if __name__ == "__main__":
print(F"{solution() = }")
| 694 | 1 |
'''simple docstring'''
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class a :
"""simple docstring"""
def __init__( self : Dict , snake_case_ : Tuple , snake_case_ : List[str]=1_3 , snake_case_ : Dict=7 , snake_case_ : int=True , snake_case_ : Tuple=True , snake_case_ : int=True , snake_case_ : Any=True , snake_case_ : Union[str, Any]=9_9 , snake_case_ : List[Any]=2_4 , snake_case_ : Optional[Any]=2 , snake_case_ : Union[str, Any]=6 , snake_case_ : Any=3_7 , snake_case_ : Any="gelu" , snake_case_ : Dict=0.1 , snake_case_ : List[str]=0.1 , snake_case_ : Dict=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : Optional[int]=2 , snake_case_ : Union[str, Any]=0.0_2 , snake_case_ : int=3 , snake_case_ : int=None , snake_case_ : Union[str, Any]=1_0_0_0 , ):
'''simple docstring'''
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : Tuple = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : str = intermediate_size
snake_case__ : Optional[int] = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : Union[str, Any] = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : Tuple = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = num_labels
snake_case__ : Dict = scope
snake_case__ : Tuple = range_bbox
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case__ : Optional[int] = bbox[i, j, 3]
snake_case__ : Dict = bbox[i, j, 1]
snake_case__ : List[str] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case__ : Dict = bbox[i, j, 2]
snake_case__ : List[str] = bbox[i, j, 0]
snake_case__ : Dict = t
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : str = None
if self.use_token_type_ids:
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Tuple = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[Any] = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __magic_name__ ( self : Optional[int] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , ):
'''simple docstring'''
snake_case__ : Optional[Any] = LiltModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ )
snake_case__ : Optional[Any] = model(snake_case_ , bbox=snake_case_ , token_type_ids=snake_case_ )
snake_case__ : Union[str, Any] = model(snake_case_ , bbox=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int , snake_case_ : int , snake_case_ : Tuple , ):
'''simple docstring'''
snake_case__ : List[Any] = self.num_labels
snake_case__ : str = LiltForTokenClassification(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : List[Any] = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Dict , snake_case_ : int , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : int , ):
'''simple docstring'''
snake_case__ : Optional[int] = LiltForQuestionAnswering(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[int] = model(
snake_case_ , bbox=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , start_positions=snake_case_ , end_positions=snake_case_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
(
snake_case__
) : Tuple = config_and_inputs
snake_case__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : List[str] , snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : Dict , snake_case_ : str , snake_case_ : Any ):
'''simple docstring'''
return True
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ : List[Any] = LiltModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=snake_case_ , hidden_size=3_7 )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : int = type
self.model_tester.create_and_check_model(*snake_case_ )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
@slow
def __magic_name__ ( self : str ):
'''simple docstring'''
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = LiltModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
@require_torch
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Any = LiltModel.from_pretrained('''SCUT-DLVCLab/lilt-roberta-en-base''' ).to(snake_case_ )
snake_case__ : int = torch.tensor([[1, 2]] , device=snake_case_ )
snake_case__ : List[str] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=snake_case_ )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(input_ids=snake_case_ , bbox=snake_case_ )
snake_case__ : Tuple = torch.Size([1, 2, 7_6_8] )
snake_case__ : Dict = torch.tensor(
[[-0.0_6_5_3, 0.0_9_5_0, -0.0_0_6_1], [-0.0_5_4_5, 0.0_9_2_6, -0.0_3_2_4]] , device=snake_case_ , )
self.assertTrue(outputs.last_hidden_state.shape , snake_case_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , snake_case_ , atol=1e-3 ) )
| 709 |
'''simple docstring'''
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class a :
"""simple docstring"""
def __init__( self : Any , snake_case_ : str , snake_case_ : Optional[Any]=1_3 , snake_case_ : int=7 , snake_case_ : int=True , snake_case_ : Optional[Any]=True , snake_case_ : Dict=True , snake_case_ : int=True , snake_case_ : Optional[Any]=9_9 , snake_case_ : int=6_4 , snake_case_ : Dict=5 , snake_case_ : List[Any]=4 , snake_case_ : Union[str, Any]=3_7 , snake_case_ : Dict="gelu" , snake_case_ : Union[str, Any]=0.1 , snake_case_ : Dict=0.1 , snake_case_ : Any=5_1_2 , snake_case_ : Any=1_6 , snake_case_ : Any=2 , snake_case_ : Dict=0.0_2 , snake_case_ : List[str]=3 , snake_case_ : Optional[int]=4 , snake_case_ : str=None , ):
'''simple docstring'''
snake_case__ : List[Any] = parent
snake_case__ : int = batch_size
snake_case__ : Dict = seq_length
snake_case__ : int = is_training
snake_case__ : Optional[Any] = use_input_mask
snake_case__ : Optional[Any] = use_token_type_ids
snake_case__ : Dict = use_labels
snake_case__ : int = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : Union[str, Any] = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : int = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_size
snake_case__ : Any = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : Dict = num_choices
snake_case__ : Union[str, Any] = scope
snake_case__ : List[Any] = vocab_size - 1
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = None
if self.use_input_mask:
snake_case__ : int = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Dict = self.get_config()
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self.prepare_config_and_inputs()
snake_case__ : List[str] = True
return config, input_ids, input_mask, token_labels
def __magic_name__ ( self : Tuple , snake_case_ : Any , snake_case_ : str , snake_case_ : str ):
'''simple docstring'''
snake_case__ : Any = GPTNeoXModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Union[str, Any] = model(snake_case_ , attention_mask=snake_case_ )
snake_case__ : int = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : List[Any] , snake_case_ : Any , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = True
snake_case__ : Tuple = GPTNeoXModel(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Dict = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __magic_name__ ( self : Any , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : List[Any] ):
'''simple docstring'''
snake_case__ : Union[str, Any] = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ ( self : Optional[int] , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[str] ):
'''simple docstring'''
snake_case__ : Dict = self.num_labels
snake_case__ : List[Any] = GPTNeoXForQuestionAnswering(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Any = model(snake_case_ , attention_mask=snake_case_ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __magic_name__ ( self : List[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict ):
'''simple docstring'''
snake_case__ : str = self.num_labels
snake_case__ : List[str] = GPTNeoXForSequenceClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : str = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : Optional[Any] , snake_case_ : int , snake_case_ : Dict , snake_case_ : Optional[int] ):
'''simple docstring'''
snake_case__ : Any = self.num_labels
snake_case__ : Any = GPTNeoXForTokenClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ ( self : Union[str, Any] , snake_case_ : int , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
'''simple docstring'''
snake_case__ : Optional[Any] = True
snake_case__ : Union[str, Any] = GPTNeoXForCausalLM(config=snake_case_ )
model.to(snake_case_ )
model.eval()
# first forward pass
snake_case__ : int = model(snake_case_ , attention_mask=snake_case_ , use_cache=snake_case_ )
snake_case__ : Dict = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : Union[str, Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
snake_case__ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Optional[int] = torch.cat([input_mask, next_mask] , dim=-1 )
snake_case__ : Optional[Any] = model(snake_case_ , attention_mask=snake_case_ , output_hidden_states=snake_case_ )
snake_case__ : Union[str, Any] = output_from_no_past['''hidden_states'''][0]
snake_case__ : str = model(
snake_case_ , attention_mask=snake_case_ , past_key_values=snake_case_ , output_hidden_states=snake_case_ , )['''hidden_states'''][0]
# select random slice
snake_case__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-3 ) )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Tuple = config_and_inputs
snake_case__ : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCAmelCase = (GPTNeoXForCausalLM,) if is_torch_available() else ()
__UpperCAmelCase = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
__UpperCAmelCase = False
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : Optional[int] = GPTNeoXModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=snake_case_ , hidden_size=6_4 , num_attention_heads=8 )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Dict ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Union[str, Any] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case__ : Optional[int] = None
self.model_tester.create_and_check_model_as_decoder(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : Optional[int] ):
'''simple docstring'''
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(snake_case_ , snake_case_ , snake_case_ )
def __magic_name__ ( self : int ):
'''simple docstring'''
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*snake_case_ )
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case_ )
def __magic_name__ ( self : Any ):
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case_ )
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case_ )
@unittest.skip(reason='''Feed forward chunking is not implemented''' )
def __magic_name__ ( self : Optional[Any] ):
'''simple docstring'''
pass
@parameterized.expand([('''linear''',), ('''dynamic''',)] )
def __magic_name__ ( self : Optional[int] , snake_case_ : Optional[Any] ):
'''simple docstring'''
snake_case__ , snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Dict = ids_tensor([1, 1_0] , config.vocab_size )
snake_case__ : List[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Tuple = GPTNeoXModel(snake_case_ )
original_model.to(snake_case_ )
original_model.eval()
snake_case__ : Any = original_model(snake_case_ ).last_hidden_state
snake_case__ : List[str] = original_model(snake_case_ ).last_hidden_state
set_seed(4_2 ) # Fixed seed at init time so the two models get the same random weights
snake_case__ : Optional[Any] = {'''type''': scaling_type, '''factor''': 1_0.0}
snake_case__ : Optional[Any] = GPTNeoXModel(snake_case_ )
scaled_model.to(snake_case_ )
scaled_model.eval()
snake_case__ : Optional[int] = scaled_model(snake_case_ ).last_hidden_state
snake_case__ : List[str] = scaled_model(snake_case_ ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(snake_case_ , snake_case_ , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(snake_case_ , snake_case_ , atol=1e-5 ) )
@require_torch
class a ( unittest.TestCase ):
"""simple docstring"""
@slow
def __magic_name__ ( self : List[str] ):
'''simple docstring'''
snake_case__ : Dict = AutoTokenizer.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
for checkpointing in [True, False]:
snake_case__ : str = GPTNeoXForCausalLM.from_pretrained('''EleutherAI/pythia-410m-deduped''' )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(snake_case_ )
snake_case__ : Tuple = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(snake_case_ )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
snake_case__ : List[str] = '''My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI\'m not sure'''
snake_case__ : Optional[int] = model.generate(**snake_case_ , do_sample=snake_case_ , max_new_tokens=2_0 )
snake_case__ : Tuple = tokenizer.batch_decode(snake_case_ )[0]
self.assertEqual(snake_case_ , snake_case_ )
| 502 | 0 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class A ( SCREAMING_SNAKE_CASE__ ):
def __get__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Union[str, Any]=None ):
"""simple docstring"""
if obj is None:
return self
if self.fget is None:
raise AttributeError("unreadable attribute" )
lowerCAmelCase__ = "__cached_" + self.fget.__name__
lowerCAmelCase__ = getattr(__magic_name__ , __magic_name__ , __magic_name__ )
if cached is None:
lowerCAmelCase__ = self.fget(__magic_name__ )
setattr(__magic_name__ , __magic_name__ , __magic_name__ )
return cached
def A ( UpperCamelCase_ : List[str] ) -> str:
'''simple docstring'''
lowerCAmelCase__ = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(F"""invalid truth value {val!r}""" )
def A ( UpperCamelCase_ : List[Any] ) -> Any:
'''simple docstring'''
if is_torch_fx_proxy(UpperCamelCase_ ):
return True
if is_torch_available():
import torch
if isinstance(UpperCamelCase_ , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(UpperCamelCase_ , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(UpperCamelCase_ , (jnp.ndarray, Tracer) ):
return True
return isinstance(UpperCamelCase_ , np.ndarray )
def A ( UpperCamelCase_ : Any ) -> Any:
'''simple docstring'''
return isinstance(UpperCamelCase_ , np.ndarray )
def A ( UpperCamelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return _is_numpy(UpperCamelCase_ )
def A ( UpperCamelCase_ : Union[str, Any] ) -> Dict:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase_ , torch.Tensor )
def A ( UpperCamelCase_ : str ) -> Optional[Any]:
'''simple docstring'''
return False if not is_torch_available() else _is_torch(UpperCamelCase_ )
def A ( UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
import torch
return isinstance(UpperCamelCase_ , torch.device )
def A ( UpperCamelCase_ : Any ) -> str:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_device(UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
import torch
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
if hasattr(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = getattr(UpperCamelCase_ , UpperCamelCase_ )
else:
return False
return isinstance(UpperCamelCase_ , torch.dtype )
def A ( UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
return False if not is_torch_available() else _is_torch_dtype(UpperCamelCase_ )
def A ( UpperCamelCase_ : str ) -> Tuple:
'''simple docstring'''
import tensorflow as tf
return isinstance(UpperCamelCase_ , tf.Tensor )
def A ( UpperCamelCase_ : Union[str, Any] ) -> List[str]:
'''simple docstring'''
return False if not is_tf_available() else _is_tensorflow(UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[int] ) -> str:
'''simple docstring'''
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(UpperCamelCase_ , "is_symbolic_tensor" ):
return tf.is_symbolic_tensor(UpperCamelCase_ )
return type(UpperCamelCase_ ) == tf.Tensor
def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return False if not is_tf_available() else _is_tf_symbolic_tensor(UpperCamelCase_ )
def A ( UpperCamelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
import jax.numpy as jnp # noqa: F811
return isinstance(UpperCamelCase_ , jnp.ndarray )
def A ( UpperCamelCase_ : Union[str, Any] ) -> str:
'''simple docstring'''
return False if not is_flax_available() else _is_jax(UpperCamelCase_ )
def A ( UpperCamelCase_ : List[str] ) -> Tuple:
'''simple docstring'''
if isinstance(UpperCamelCase_ , (dict, UserDict) ):
return {k: to_py_obj(UpperCamelCase_ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return [to_py_obj(UpperCamelCase_ ) for o in obj]
elif is_tf_tensor(UpperCamelCase_ ):
return obj.numpy().tolist()
elif is_torch_tensor(UpperCamelCase_ ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(UpperCamelCase_ ):
return np.asarray(UpperCamelCase_ ).tolist()
elif isinstance(UpperCamelCase_ , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def A ( UpperCamelCase_ : Tuple ) -> Optional[Any]:
'''simple docstring'''
if isinstance(UpperCamelCase_ , (dict, UserDict) ):
return {k: to_numpy(UpperCamelCase_ ) for k, v in obj.items()}
elif isinstance(UpperCamelCase_ , (list, tuple) ):
return np.array(UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
return obj.numpy()
elif is_torch_tensor(UpperCamelCase_ ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(UpperCamelCase_ ):
return np.asarray(UpperCamelCase_ )
else:
return obj
class A ( SCREAMING_SNAKE_CASE__ ):
def __SCREAMING_SNAKE_CASE ( self : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = fields(self )
# Safety and consistency checks
if not len(__magic_name__ ):
raise ValueError(f"""{self.__class__.__name__} has no fields.""" )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(f"""{self.__class__.__name__} should not have more than one required field.""" )
lowerCAmelCase__ = getattr(self , class_fields[0].name )
lowerCAmelCase__ = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(__magic_name__ ):
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = first_field.items()
lowerCAmelCase__ = True
else:
try:
lowerCAmelCase__ = iter(__magic_name__ )
lowerCAmelCase__ = True
except TypeError:
lowerCAmelCase__ = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(__magic_name__ ):
if (
not isinstance(__magic_name__ , (list, tuple) )
or not len(__magic_name__ ) == 2
or not isinstance(element[0] , __magic_name__ )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
lowerCAmelCase__ = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
f"""Cannot set key/value for {element}. It needs to be a tuple (key, value).""" )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
lowerCAmelCase__ = element[1]
elif first_field is not None:
lowerCAmelCase__ = first_field
else:
for field in class_fields:
lowerCAmelCase__ = getattr(self , field.name )
if v is not None:
lowerCAmelCase__ = v
def __delitem__( self : Union[str, Any] , *__magic_name__ : Tuple , **__magic_name__ : Dict ):
"""simple docstring"""
raise Exception(f"""You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.""" )
def __SCREAMING_SNAKE_CASE ( self : Tuple , *__magic_name__ : Tuple , **__magic_name__ : Dict ):
"""simple docstring"""
raise Exception(f"""You cannot use ``setdefault`` on a {self.__class__.__name__} instance.""" )
def __SCREAMING_SNAKE_CASE ( self : int , *__magic_name__ : Union[str, Any] , **__magic_name__ : Union[str, Any] ):
"""simple docstring"""
raise Exception(f"""You cannot use ``pop`` on a {self.__class__.__name__} instance.""" )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , *__magic_name__ : List[Any] , **__magic_name__ : List[Any] ):
"""simple docstring"""
raise Exception(f"""You cannot use ``update`` on a {self.__class__.__name__} instance.""" )
def __getitem__( self : Union[str, Any] , __magic_name__ : Optional[int] ):
"""simple docstring"""
if isinstance(__magic_name__ , __magic_name__ ):
lowerCAmelCase__ = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : int , __magic_name__ : Optional[Any] , __magic_name__ : Union[str, Any] ):
"""simple docstring"""
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(__magic_name__ , __magic_name__ )
super().__setattr__(__magic_name__ , __magic_name__ )
def __setitem__( self : List[Any] , __magic_name__ : List[str] , __magic_name__ : List[Any] ):
"""simple docstring"""
super().__setitem__(__magic_name__ , __magic_name__ )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(__magic_name__ , __magic_name__ )
def __SCREAMING_SNAKE_CASE ( self : Tuple ):
"""simple docstring"""
return tuple(self[k] for k in self.keys() )
class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : int , __magic_name__ : Optional[int] ):
"""simple docstring"""
raise ValueError(
f"""{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}""" )
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :int = 'longest'
snake_case__ :Optional[Any] = 'max_length'
snake_case__ :str = 'do_not_pad'
class A ( SCREAMING_SNAKE_CASE__ ):
snake_case__ :Any = 'pt'
snake_case__ :Union[str, Any] = 'tf'
snake_case__ :Any = 'np'
snake_case__ :List[str] = 'jax'
class A :
def __init__( self : Dict , __magic_name__ : List[ContextManager] ):
"""simple docstring"""
lowerCAmelCase__ = context_managers
lowerCAmelCase__ = ExitStack()
def __enter__( self : str ):
"""simple docstring"""
for context_manager in self.context_managers:
self.stack.enter_context(__magic_name__ )
def __exit__( self : Dict , *__magic_name__ : Dict , **__magic_name__ : int ):
"""simple docstring"""
self.stack.__exit__(*__magic_name__ , **__magic_name__ )
def A ( UpperCamelCase_ : Dict ) -> List[str]:
'''simple docstring'''
lowerCAmelCase__ = infer_framework(UpperCamelCase_ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def A ( UpperCamelCase_ : int ) -> int:
'''simple docstring'''
lowerCAmelCase__ = model_class.__name__
lowerCAmelCase__ = infer_framework(UpperCamelCase_ )
if framework == "tf":
lowerCAmelCase__ = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
lowerCAmelCase__ = inspect.signature(model_class.forward ) # PyTorch models
else:
lowerCAmelCase__ = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def A ( UpperCamelCase_ : MutableMapping , UpperCamelCase_ : str = "" , UpperCamelCase_ : str = "." ) -> List[Any]:
'''simple docstring'''
def _flatten_dict(UpperCamelCase_ : Dict , UpperCamelCase_ : Optional[int]="" , UpperCamelCase_ : Any="." ):
for k, v in d.items():
lowerCAmelCase__ = str(UpperCamelCase_ ) + delimiter + str(UpperCamelCase_ ) if parent_key else k
if v and isinstance(UpperCamelCase_ , UpperCamelCase_ ):
yield from flatten_dict(UpperCamelCase_ , UpperCamelCase_ , delimiter=UpperCamelCase_ ).items()
else:
yield key, v
return dict(_flatten_dict(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) )
@contextmanager
def A ( UpperCamelCase_ : List[Any] , UpperCamelCase_ : bool = False ) -> Any:
'''simple docstring'''
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Union[str, Any]=None ) -> Tuple:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.transpose(UpperCamelCase_ , axes=UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.T if axes is None else array.permute(*UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.transpose(UpperCamelCase_ , perm=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.transpose(UpperCamelCase_ , axes=UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for transpose: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : str , UpperCamelCase_ : int ) -> Dict:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.reshape(UpperCamelCase_ , UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.reshape(*UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.reshape(UpperCamelCase_ , UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.reshape(UpperCamelCase_ , UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for reshape: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str]=None ) -> Optional[int]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.squeeze() if axis is None else array.squeeze(dim=UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.squeeze(UpperCamelCase_ , axis=UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for squeeze: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Tuple ) -> List[Any]:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.expand_dims(UpperCamelCase_ , UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.unsqueeze(dim=UpperCamelCase_ )
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return jnp.expand_dims(UpperCamelCase_ , axis=UpperCamelCase_ )
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
if is_numpy_array(UpperCamelCase_ ):
return np.size(UpperCamelCase_ )
elif is_torch_tensor(UpperCamelCase_ ):
return array.numel()
elif is_tf_tensor(UpperCamelCase_ ):
import tensorflow as tf
return tf.size(UpperCamelCase_ )
elif is_jax_tensor(UpperCamelCase_ ):
return array.size
else:
raise ValueError(F"""Type not supported for expand_dims: {type(UpperCamelCase_ )}.""" )
def A ( UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : List[str] ) -> Dict:
'''simple docstring'''
for key, value in auto_map.items():
if isinstance(UpperCamelCase_ , (tuple, list) ):
lowerCAmelCase__ = [F"""{repo_id}--{v}""" if (v is not None and "--" not in v) else v for v in value]
elif value is not None and "--" not in value:
lowerCAmelCase__ = F"""{repo_id}--{value}"""
return auto_map
def A ( UpperCamelCase_ : str ) -> Any:
'''simple docstring'''
for base_class in inspect.getmro(UpperCamelCase_ ):
lowerCAmelCase__ = base_class.__module__
lowerCAmelCase__ = base_class.__name__
if module.startswith("tensorflow" ) or module.startswith("keras" ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith("torch" ) or name == "PreTrainedModel":
return "pt"
elif module.startswith("flax" ) or module.startswith("jax" ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(F"""Could not infer framework from class {model_class}.""" )
| 48 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase_ :
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ) -> Any:
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = image_size
__UpperCamelCase = patch_size
__UpperCamelCase = num_channels
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_act
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = type_sequence_label_size
__UpperCamelCase = initializer_range
__UpperCamelCase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__UpperCamelCase = (image_size // patch_size) ** 2
__UpperCamelCase = num_patches + 1
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCamelCase = None
if self.use_labels:
__UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCamelCase = self.get_config()
return config, pixel_values, labels
def __lowercase( self ) -> str:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
__UpperCamelCase = TFViTModel(config=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__UpperCamelCase = self.image_size // 2
__UpperCamelCase = pixel_values[:, :, :image_size, :image_size]
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def __lowercase( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
__UpperCamelCase = self.type_sequence_label_size
__UpperCamelCase = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__UpperCamelCase = self.image_size // 2
__UpperCamelCase = pixel_values[:, :, :image_size, :image_size]
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE , interpolate_pos_encoding=_SCREAMING_SNAKE_CASE , training=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__UpperCamelCase = 1
__UpperCamelCase = TFViTForImageClassification(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__UpperCamelCase = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowercase( self ) -> Dict:
__UpperCamelCase = self.prepare_config_and_inputs()
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = config_and_inputs
__UpperCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
UpperCAmelCase__ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
def __lowercase( self ) -> List[str]:
__UpperCamelCase = TFViTModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowercase( self ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowercase( self ) -> str:
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __lowercase( self ) -> Tuple:
pass
def __lowercase( self ) -> int:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__UpperCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def __lowercase( self ) -> Dict:
__UpperCamelCase , __UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCamelCase = model_class(_SCREAMING_SNAKE_CASE )
__UpperCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCamelCase = [*signature.parameters.keys()]
__UpperCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> str:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> str:
__UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def _a ( ) -> Optional[Any]:
"""simple docstring"""
__UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase( self ) -> List[Any]:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __lowercase( self ) -> Dict:
__UpperCamelCase = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
__UpperCamelCase = self.default_image_processor
__UpperCamelCase = prepare_img()
__UpperCamelCase = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
# forward pass
__UpperCamelCase = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__UpperCamelCase = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__UpperCamelCase = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 )
| 383 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _UpperCAmelCase (UpperCamelCase_ : Union[str, Any] ):
'''simple docstring'''
_lowerCAmelCase : str = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(__snake_case , __snake_case )
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = emb.weight.shape
_lowerCAmelCase : Any = nn.Linear(__snake_case , __snake_case , bias=__snake_case )
_lowerCAmelCase : Dict = emb.weight.data
return lin_layer
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Any=None ):
'''simple docstring'''
_lowerCAmelCase : Any = {}
for old_key in state_dict.keys():
_lowerCAmelCase : Union[str, Any] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_lowerCAmelCase : Union[str, Any] = key.replace("""moe_layer.experts.0""" , F"ffn.experts.expert_{expert_idx}" )
else:
_lowerCAmelCase : Dict = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
_lowerCAmelCase : Optional[int] = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
_lowerCAmelCase : int = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
_lowerCAmelCase : Union[str, Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
_lowerCAmelCase : List[Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
_lowerCAmelCase : Optional[Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
_lowerCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
_lowerCAmelCase : Optional[int] = state_dict[old_key]
return new_dict
def _UpperCAmelCase (UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple , UpperCamelCase_ : List[Any] , UpperCamelCase_ : str = WEIGHTS_NAME ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : Union[str, Any] = 0
os.makedirs(__snake_case , exist_ok=__snake_case )
for expert in range(__snake_case ):
_lowerCAmelCase : Optional[Any] = switch_checkpoint_path + F"-rank-{expert}.pt"
if os.path.isfile(__snake_case ):
_lowerCAmelCase : Optional[int] = torch.load(__snake_case )["model"]
remove_ignore_keys_(__snake_case )
_lowerCAmelCase : Dict = rename_fairseq_keys(__snake_case , __snake_case )
_lowerCAmelCase : Optional[int] = os.path.join(
__snake_case , weights_name.replace(""".bin""" , F"-{len(__snake_case )+1:05d}-of-???.bin" ) )
torch.save(__snake_case , __snake_case )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(__snake_case )[0]].dtype )
# Add the last block
_lowerCAmelCase : List[Any] = os.path.join(__snake_case , weights_name.replace(""".bin""" , F"-{len(__snake_case )+1:05d}-of-???.bin" ) )
_lowerCAmelCase : List[Any] = torch.load(switch_checkpoint_path + """-shared.pt""" )["model"]
remove_ignore_keys_(__snake_case )
_lowerCAmelCase : Optional[Any] = rename_fairseq_keys(__snake_case , __snake_case )
_lowerCAmelCase : int = shared_weights["decoder.embed_tokens.weight"]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(__snake_case ) == 1:
_lowerCAmelCase : int = os.path.join(__snake_case , __snake_case )
torch.save(__snake_case , __snake_case )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(__snake_case , __snake_case )
# Otherwise, let's build the index
_lowerCAmelCase : Any = {}
for idx, shard in enumerate(__snake_case ):
_lowerCAmelCase : Optional[Any] = weights_name.replace(""".bin""" , F"-{idx+1:05d}-of-{len(__snake_case ):05d}.bin" )
_lowerCAmelCase : List[Any] = os.path.join(__snake_case , weights_name.replace(""".bin""" , F"-{idx+1:05d}-of-???.bin" ) )
os.rename(__snake_case , os.path.join(__snake_case , __snake_case ) )
for key in shard:
_lowerCAmelCase : Optional[int] = shard_file
# Add the metadata
_lowerCAmelCase : Tuple = {"total_size": total_size}
_lowerCAmelCase : Optional[Any] = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(__snake_case , __snake_case ) , """w""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : List[Any] = json.dumps(__snake_case , indent=2 , sort_keys=__snake_case ) + "\n"
f.write(__snake_case )
return metadata, index
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
_lowerCamelCase : Any = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
_lowerCamelCase : int = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
_lowerCamelCase : List[Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 713 |
import bza
import gzip
import lzma
import os
import shutil
import struct
import tarfile
import warnings
import zipfile
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Dict, List, Optional, Type, Union
from .. import config
from .filelock import FileLock
from .logging import get_logger
_lowerCamelCase : str = get_logger(__name__)
class __snake_case :
def __init__( self : Optional[int] , _UpperCAmelCase : Optional[str] = None ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
os.path.join(_UpperCAmelCase , config.EXTRACTED_DATASETS_DIR ) if cache_dir else config.EXTRACTED_DATASETS_PATH
)
_lowerCAmelCase : Optional[int] = Extractor
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : str ) -> str:
'''simple docstring'''
from .file_utils import hash_url_to_filename
# Path where we extract compressed archives
# We extract in the cache dir, and get the extracted path name by hashing the original path"
_lowerCAmelCase : List[Any] = os.path.abspath(_UpperCAmelCase )
return os.path.join(self.extract_dir , hash_url_to_filename(_UpperCAmelCase ) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool ) -> bool:
'''simple docstring'''
return force_extract or (
not os.path.isfile(_UpperCAmelCase ) and not (os.path.isdir(_UpperCAmelCase ) and os.listdir(_UpperCAmelCase ))
)
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : bool = False ) -> str:
'''simple docstring'''
_lowerCAmelCase : str = self.extractor.infer_extractor_format(_UpperCAmelCase )
if not extractor_format:
return input_path
_lowerCAmelCase : int = self._get_output_path(_UpperCAmelCase )
if self._do_extract(_UpperCAmelCase , _UpperCAmelCase ):
self.extractor.extract(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return output_path
class __snake_case (_a ):
@classmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , _UpperCAmelCase : Union[Path, str] , **_UpperCAmelCase : Optional[int] ) -> bool:
'''simple docstring'''
...
@staticmethod
@abstractmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
...
class __snake_case (_a , _a ):
lowerCAmelCase__ = []
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : int ) -> Dict:
'''simple docstring'''
with open(_UpperCAmelCase , """rb""" ) as f:
return f.read(_UpperCAmelCase )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Any , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if not magic_number:
_lowerCAmelCase : Optional[int] = max(len(_UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
try:
_lowerCAmelCase : Union[str, Any] = cls.read_magic_number(_UpperCAmelCase , _UpperCAmelCase )
except OSError:
return False
return any(magic_number.startswith(_UpperCAmelCase ) for cls_magic_number in cls.magic_numbers )
class __snake_case (_a ):
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , _UpperCAmelCase : Union[Path, str] , **_UpperCAmelCase : Dict ) -> bool:
'''simple docstring'''
return tarfile.is_tarfile(_UpperCAmelCase )
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple ) -> Tuple:
'''simple docstring'''
def resolved(_UpperCAmelCase : str ) -> str:
return os.path.realpath(os.path.abspath(_UpperCAmelCase ) )
def badpath(_UpperCAmelCase : str , _UpperCAmelCase : str ) -> bool:
# joinpath will ignore base if path is absolute
return not resolved(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ).startswith(_UpperCAmelCase )
def badlink(_UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ) -> bool:
# Links are interpreted relative to the directory containing the link
_lowerCAmelCase : Tuple = resolved(os.path.join(_UpperCAmelCase , os.path.dirname(info.name ) ) )
return badpath(info.linkname , base=_UpperCAmelCase )
_lowerCAmelCase : List[Any] = resolved(_UpperCAmelCase )
for finfo in members:
if badpath(finfo.name , _UpperCAmelCase ):
logger.error(f"Extraction of {finfo.name} is blocked (illegal path)" )
elif finfo.issym() and badlink(_UpperCAmelCase , _UpperCAmelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Symlink to {finfo.linkname}" )
elif finfo.islnk() and badlink(_UpperCAmelCase , _UpperCAmelCase ):
logger.error(f"Extraction of {finfo.name} is blocked: Hard link to {finfo.linkname}" )
else:
yield finfo
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_lowerCAmelCase : str = tarfile.open(_UpperCAmelCase )
tar_file.extractall(_UpperCAmelCase , members=TarExtractor.safemembers(_UpperCAmelCase , _UpperCAmelCase ) )
tar_file.close()
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x1F\x8B"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with gzip.open(_UpperCAmelCase , """rb""" ) as gzip_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [
b"PK\x03\x04",
b"PK\x05\x06", # empty archive
b"PK\x07\x08", # spanned archive
]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bytes = b"" ) -> bool:
'''simple docstring'''
if super().is_extractable(_UpperCAmelCase , magic_number=_UpperCAmelCase ):
return True
try:
# Alternative version of zipfile.is_zipfile that has less false positives, but misses executable zip archives.
# From: https://github.com/python/cpython/pull/5053
from zipfile import (
_CD_SIGNATURE,
_ECD_DISK_NUMBER,
_ECD_DISK_START,
_ECD_ENTRIES_TOTAL,
_ECD_OFFSET,
_ECD_SIZE,
_EndRecData,
sizeCentralDir,
stringCentralDir,
structCentralDir,
)
with open(_UpperCAmelCase , """rb""" ) as fp:
_lowerCAmelCase : Union[str, Any] = _EndRecData(_UpperCAmelCase )
if endrec:
if endrec[_ECD_ENTRIES_TOTAL] == 0 and endrec[_ECD_SIZE] == 0 and endrec[_ECD_OFFSET] == 0:
return True # Empty zipfiles are still zipfiles
elif endrec[_ECD_DISK_NUMBER] == endrec[_ECD_DISK_START]:
fp.seek(endrec[_ECD_OFFSET] ) # Central directory is on the same disk
if fp.tell() == endrec[_ECD_OFFSET] and endrec[_ECD_SIZE] >= sizeCentralDir:
_lowerCAmelCase : List[Any] = fp.read(_UpperCAmelCase ) # CD is where we expect it to be
if len(_UpperCAmelCase ) == sizeCentralDir:
_lowerCAmelCase : int = struct.unpack(_UpperCAmelCase , _UpperCAmelCase ) # CD is the right size
if centdir[_CD_SIGNATURE] == stringCentralDir:
return True # First central directory entry has correct magic number
return False
except Exception: # catch all errors in case future python versions change the zipfile internals
return False
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with zipfile.ZipFile(_UpperCAmelCase , """r""" ) as zip_file:
zip_file.extractall(_UpperCAmelCase )
zip_file.close()
class __snake_case (_a ):
lowerCAmelCase__ = [b"\xFD\x37\x7A\x58\x5A\x00"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with lzma.open(_UpperCAmelCase ) as compressed_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"Rar!\x1a\x07\x00", b"Rar!\x1a\x07\x01\x00"] # RAR_ID # RAR5_ID
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.RARFILE_AVAILABLE:
raise ImportError("""Please pip install rarfile""" )
import rarfile
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
_lowerCAmelCase : str = rarfile.RarFile(_UpperCAmelCase )
rf.extractall(_UpperCAmelCase )
rf.close()
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x28\xb5\x2F\xFD"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.ZSTANDARD_AVAILABLE:
raise ImportError("""Please pip install zstandard""" )
import zstandard as zstd
_lowerCAmelCase : Any = zstd.ZstdDecompressor()
with open(_UpperCAmelCase , """rb""" ) as ifh, open(_UpperCAmelCase , """wb""" ) as ofh:
dctx.copy_stream(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x42\x5A\x68"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
with bza.open(_UpperCAmelCase , """rb""" ) as compressed_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x37\x7A\xBC\xAF\x27\x1C"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.PY7ZR_AVAILABLE:
raise ImportError("""Please pip install py7zr""" )
import pyazr
os.makedirs(_UpperCAmelCase , exist_ok=_UpperCAmelCase )
with pyazr.SevenZipFile(_UpperCAmelCase , """r""" ) as archive:
archive.extractall(_UpperCAmelCase )
class __snake_case (_a ):
lowerCAmelCase__ = [b"\x04\x22\x4D\x18"]
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] ) -> None:
'''simple docstring'''
if not config.LZ4_AVAILABLE:
raise ImportError("""Please pip install lz4""" )
import lza.frame
with lza.frame.open(_UpperCAmelCase , """rb""" ) as compressed_file:
with open(_UpperCAmelCase , """wb""" ) as extracted_file:
shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase )
class __snake_case :
# Put zip file to the last, b/c it is possible wrongly detected as zip (I guess it means: as tar or gzip)
lowerCAmelCase__ = {
"tar": TarExtractor,
"gzip": GzipExtractor,
"zip": ZipExtractor,
"xz": XzExtractor,
"rar": RarExtractor,
"zstd": ZstdExtractor,
"bz2": BzipaExtractor,
"7z": SevenZipExtractor, # <Added version="2.4.0"/>
"lz4": LzaExtractor, # <Added version="2.4.0"/>
}
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ) -> Optional[int]:
'''simple docstring'''
return max(
len(_UpperCAmelCase )
for extractor in cls.extractors.values()
if issubclass(_UpperCAmelCase , _UpperCAmelCase )
for extractor_magic_number in extractor.magic_numbers )
@staticmethod
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : int ) -> Any:
'''simple docstring'''
try:
return MagicNumberBaseExtractor.read_magic_number(_UpperCAmelCase , magic_number_length=_UpperCAmelCase )
except OSError:
return b""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : bool = False ) -> bool:
'''simple docstring'''
warnings.warn(
"""Method 'is_extractable' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'infer_extractor_format' instead.""" , category=_UpperCAmelCase , )
_lowerCAmelCase : str = cls.infer_extractor_format(_UpperCAmelCase )
if extractor_format:
return True if not return_extractor else (True, cls.extractors[extractor_format])
return False if not return_extractor else (False, None)
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCAmelCase : Union[Path, str] ) -> str: # <Added version="2.4.0"/>
'''simple docstring'''
_lowerCAmelCase : str = cls._get_magic_number_max_length()
_lowerCAmelCase : Dict = cls._read_magic_number(_UpperCAmelCase , _UpperCAmelCase )
for extractor_format, extractor in cls.extractors.items():
if extractor.is_extractable(_UpperCAmelCase , magic_number=_UpperCAmelCase ):
return extractor_format
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Union[Path, str] , _UpperCAmelCase : Optional[str] = None , _UpperCAmelCase : Optional[BaseExtractor] = "deprecated" , ) -> None:
'''simple docstring'''
os.makedirs(os.path.dirname(_UpperCAmelCase ) , exist_ok=_UpperCAmelCase )
# Prevent parallel extractions
_lowerCAmelCase : Tuple = str(Path(_UpperCAmelCase ).with_suffix(""".lock""" ) )
with FileLock(_UpperCAmelCase ):
shutil.rmtree(_UpperCAmelCase , ignore_errors=_UpperCAmelCase )
if extractor_format or extractor != "deprecated":
if extractor != "deprecated" or not isinstance(_UpperCAmelCase , _UpperCAmelCase ): # passed as positional arg
warnings.warn(
"""Parameter 'extractor' was deprecated in version 2.4.0 and will be removed in 3.0.0. """
"""Use 'extractor_format' instead.""" , category=_UpperCAmelCase , )
_lowerCAmelCase : List[str] = extractor if extractor != """deprecated""" else extractor_format
else:
_lowerCAmelCase : List[Any] = cls.extractors[extractor_format]
return extractor.extract(_UpperCAmelCase , _UpperCAmelCase )
else:
warnings.warn(
"""Parameter 'extractor_format' was made required in version 2.4.0 and not passing it will raise an """
"""exception in 3.0.0.""" , category=_UpperCAmelCase , )
for extractor in cls.extractors.values():
if extractor.is_extractable(_UpperCAmelCase ):
return extractor.extract(_UpperCAmelCase , _UpperCAmelCase )
| 196 | 0 |
"""simple docstring"""
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __snake_case ( _lowercase):
def __init__( self : List[Any] , __lowerCAmelCase : NestedDataStructureLike[PathLike] , __lowerCAmelCase : Optional[NamedSplit] = None , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[str] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(
__lowerCAmelCase , split=__lowerCAmelCase , features=__lowerCAmelCase , cache_dir=__lowerCAmelCase , keep_in_memory=__lowerCAmelCase , streaming=__lowerCAmelCase , num_proc=__lowerCAmelCase , **__lowerCAmelCase , )
_lowerCamelCase : Any = field
_lowerCamelCase : Any = path_or_paths if isinstance(__lowerCAmelCase , __lowerCAmelCase ) else {self.split: path_or_paths}
_lowerCamelCase : Optional[int] = Json(
cache_dir=__lowerCAmelCase , data_files=__lowerCAmelCase , features=__lowerCAmelCase , field=__lowerCAmelCase , **__lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self : int ):
"""simple docstring"""
if self.streaming:
_lowerCamelCase : Union[str, Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
_lowerCamelCase : List[Any] = None
_lowerCamelCase : Any = None
_lowerCamelCase : Union[str, Any] = None
_lowerCamelCase : List[str] = None
self.builder.download_and_prepare(
download_config=__lowerCAmelCase , download_mode=__lowerCAmelCase , verification_mode=__lowerCAmelCase , base_path=__lowerCAmelCase , num_proc=self.num_proc , )
_lowerCamelCase : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=__lowerCAmelCase , in_memory=self.keep_in_memory )
return dataset
class __snake_case :
def __init__( self : Optional[int] , __lowerCAmelCase : Dataset , __lowerCAmelCase : Union[PathLike, BinaryIO] , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Dict , ):
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(f'''num_proc {num_proc} must be an integer > 0.''' )
_lowerCamelCase : Any = dataset
_lowerCamelCase : Any = path_or_buf
_lowerCamelCase : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_lowerCamelCase : str = num_proc
_lowerCamelCase : Optional[int] = '''utf-8'''
_lowerCamelCase : Optional[Any] = to_json_kwargs
def SCREAMING_SNAKE_CASE ( self : List[str] ):
"""simple docstring"""
_lowerCamelCase : int = self.to_json_kwargs.pop('''path_or_buf''' , __lowerCAmelCase )
_lowerCamelCase : List[str] = self.to_json_kwargs.pop('''orient''' , '''records''' )
_lowerCamelCase : str = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
_lowerCamelCase : Tuple = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
_lowerCamelCase : Dict = self.to_json_kwargs.pop('''compression''' , __lowerCAmelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f'''`datasets` currently does not support {compression} compression''' )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=__lowerCAmelCase ) as buffer:
_lowerCamelCase : List[str] = self._write(file_obj=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
''' was passed. Please provide a local path instead.''' )
_lowerCamelCase : List[str] = self._write(
file_obj=self.path_or_buf , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **self.to_json_kwargs )
return written
def SCREAMING_SNAKE_CASE ( self : Dict , __lowerCAmelCase : Optional[int] ):
"""simple docstring"""
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : str = args
_lowerCamelCase : Any = query_table(
table=self.dataset.data , key=slice(__lowerCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
_lowerCamelCase : Any = batch.to_pandas().to_json(
path_or_buf=__lowerCAmelCase , orient=__lowerCAmelCase , lines=__lowerCAmelCase , index=__lowerCAmelCase , **__lowerCAmelCase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : BinaryIO , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
_lowerCamelCase : int = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
_lowerCamelCase : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(__lowerCAmelCase )
else:
_lowerCamelCase , _lowerCamelCase : str = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , __lowerCAmelCase , __lowerCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(__lowerCAmelCase )
return written
| 83 |
"""simple docstring"""
import os
import time
import numpy as np
import onnxruntime as ort
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = '''0'''
lowerCAmelCase__ = '''1'''
lowerCAmelCase__ = ort.SessionOptions()
lowerCAmelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL
print('''Create inference session...''')
lowerCAmelCase__ = ['''TensorrtExecutionProvider''', '''CUDAExecutionProvider''']
lowerCAmelCase__ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider)
lowerCAmelCase__ = ort.RunOptions()
lowerCAmelCase__ = 128
lowerCAmelCase__ = 1
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
lowerCAmelCase__ = np.ones((batch, sequence), dtype=np.intaa)
print('''Warm up phase...''')
sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Start inference...''')
lowerCAmelCase__ = time.time()
lowerCAmelCase__ = 2000
lowerCAmelCase__ = {}
for iter in range(max_iters):
lowerCAmelCase__ = sess.run(
None,
{
sess.get_inputs()[0].name: input_ids,
sess.get_inputs()[1].name: attention_mask,
sess.get_inputs()[2].name: token_type_ids,
},
run_options=run_opt,
)
print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
| 83 | 1 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
a_ : Optional[int] =(CMStochasticIterativeScheduler,)
a_ : Any =10
def UpperCamelCase_ ( self : Union[str, Any] , **UpperCamelCase : Any ):
'''simple docstring'''
_snake_case : Optional[Any] = {
'num_train_timesteps': 2_01,
'sigma_min': 0.0_02,
'sigma_max': 80.0,
}
config.update(**UpperCamelCase )
return config
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : List[str] = 10
_snake_case : Tuple = self.get_scheduler_config()
_snake_case : Optional[Any] = self.scheduler_classes[0](**UpperCamelCase )
scheduler.set_timesteps(UpperCamelCase )
_snake_case : List[str] = scheduler.timesteps[0]
_snake_case : Union[str, Any] = scheduler.timesteps[1]
_snake_case : Any = self.dummy_sample
_snake_case : List[str] = 0.1 * sample
_snake_case : Tuple = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
_snake_case : Union[str, Any] = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=UpperCamelCase )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = self.scheduler_classes[0]
_snake_case : Dict = self.get_scheduler_config()
_snake_case : List[Any] = scheduler_class(**UpperCamelCase )
_snake_case : Tuple = 1
scheduler.set_timesteps(UpperCamelCase )
_snake_case : Any = scheduler.timesteps
_snake_case : Dict = torch.manual_seed(0 )
_snake_case : Optional[int] = self.dummy_model()
_snake_case : Any = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(UpperCamelCase ):
# 1. scale model input
_snake_case : Optional[int] = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
_snake_case : Optional[Any] = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
_snake_case : Any = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
_snake_case : Tuple = pred_prev_sample
_snake_case : List[str] = torch.sum(torch.abs(UpperCamelCase ) )
_snake_case : Optional[Any] = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 1_92.76_14 ) < 1e-2
assert abs(result_mean.item() - 0.25_10 ) < 1e-3
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : str = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**UpperCamelCase )
_snake_case : Optional[int] = [1_06, 0]
scheduler.set_timesteps(timesteps=UpperCamelCase )
_snake_case : Optional[Any] = scheduler.timesteps
_snake_case : Any = torch.manual_seed(0 )
_snake_case : Union[str, Any] = self.dummy_model()
_snake_case : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
_snake_case : int = scheduler.scale_model_input(UpperCamelCase , UpperCamelCase )
# 2. predict noise residual
_snake_case : Union[str, Any] = model(UpperCamelCase , UpperCamelCase )
# 3. predict previous sample x_t-1
_snake_case : Optional[int] = scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , generator=UpperCamelCase ).prev_sample
_snake_case : Union[str, Any] = pred_prev_sample
_snake_case : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase ) )
_snake_case : Tuple = torch.mean(torch.abs(UpperCamelCase ) )
assert abs(result_sum.item() - 3_47.63_57 ) < 1e-2
assert abs(result_mean.item() - 0.45_27 ) < 1e-3
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config()
_snake_case : Optional[int] = scheduler_class(**UpperCamelCase )
_snake_case : Optional[Any] = [39, 30, 12, 15, 0]
with self.assertRaises(UpperCamelCase , msg='`timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
_snake_case : Optional[int] = self.scheduler_classes[0]
_snake_case : List[Any] = self.get_scheduler_config()
_snake_case : int = scheduler_class(**UpperCamelCase )
_snake_case : Dict = [39, 30, 12, 1, 0]
_snake_case : Union[str, Any] = len(UpperCamelCase )
with self.assertRaises(UpperCamelCase , msg='Can only pass one of `num_inference_steps` or `timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=UpperCamelCase , timesteps=UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Dict = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config()
_snake_case : int = scheduler_class(**UpperCamelCase )
_snake_case : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
UpperCamelCase , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=UpperCamelCase )
| 708 |
import copy
from dataclasses import dataclass
from pathlib import Path
from typing import Dict, Optional, Union
@dataclass
class _lowerCAmelCase :
'''simple docstring'''
a_ : Optional[Union[str, Path]] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
a_ : bool =False
a_ : bool =False
a_ : bool =False
a_ : bool =True
a_ : Optional[int] =None
a_ : int =1
a_ : Optional[Union[str, bool]] =None
a_ : bool =False
a_ : Optional[Dict] =None
a_ : Optional[str] =None
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.__class__(**{k: copy.deepcopy(UpperCamelCase ) for k, v in self.__dict__.items()} )
| 669 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def _a( UpperCamelCase__ : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int =[False] * len(UpperCamelCase__ )
SCREAMING_SNAKE_CASE__ : List[Any] =[-1] * len(UpperCamelCase__ )
def dfs(UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
SCREAMING_SNAKE_CASE__ : List[str] =True
SCREAMING_SNAKE_CASE__ : Optional[Any] =c
for u in graph[v]:
if not visited[u]:
dfs(UpperCamelCase__, 1 - c )
for i in range(len(UpperCamelCase__ ) ):
if not visited[i]:
dfs(UpperCamelCase__, 0 )
for i in range(len(UpperCamelCase__ ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
a_ = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph)) | 296 |
'''simple docstring'''
from __future__ import annotations
def _a( UpperCamelCase__ : list, UpperCamelCase__ : int ):
'''simple docstring'''
if len(UpperCamelCase__ ) <= 1 or n <= 1:
return
insert_next(UpperCamelCase__, n - 1 )
rec_insertion_sort(UpperCamelCase__, n - 1 )
def _a( UpperCamelCase__ : list, UpperCamelCase__ : int ):
'''simple docstring'''
if index >= len(UpperCamelCase__ ) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple =(
collection[index],
collection[index - 1],
)
insert_next(UpperCamelCase__, index + 1 )
if __name__ == "__main__":
a_ = input('Enter integers separated by spaces: ')
a_ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list) | 296 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A : str = logging.get_logger(__name__)
__A : Union[str, Any] = {
"bigcode/gpt_bigcode-santacoder": "https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json",
}
class __lowerCAmelCase ( _UpperCamelCase):
'''simple docstring'''
__magic_name__ : Dict = """gpt_bigcode"""
__magic_name__ : str = ["""past_key_values"""]
__magic_name__ : Union[str, Any] = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , UpperCamelCase__ : Optional[int]=50257 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : Any=768 , UpperCamelCase__ : List[Any]=12 , UpperCamelCase__ : int=12 , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Union[str, Any]="gelu_pytorch_tanh" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Any=1E-5 , UpperCamelCase__ : Union[str, Any]=0.02 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Dict=50256 , UpperCamelCase__ : str=50256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : int=True , **UpperCamelCase__ : List[str] , ):
A__ : Any =vocab_size
A__ : Optional[int] =n_positions
A__ : Tuple =n_embd
A__ : Optional[int] =n_layer
A__ : Optional[int] =n_head
A__ : Tuple =n_inner
A__ : List[str] =activation_function
A__ : List[Any] =resid_pdrop
A__ : Optional[Any] =embd_pdrop
A__ : List[Any] =attn_pdrop
A__ : Optional[int] =layer_norm_epsilon
A__ : Any =initializer_range
A__ : List[Any] =scale_attn_weights
A__ : Optional[int] =use_cache
A__ : List[Any] =attention_softmax_in_fpaa
A__ : Any =scale_attention_softmax_in_fpaa
A__ : int =multi_query
A__ : Tuple =bos_token_id
A__ : List[str] =eos_token_id
super().__init__(bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
| 595 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A : Dict = {
"configuration_rembert": ["REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RemBertConfig", "RemBertOnnxConfig"]
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : str = ["RemBertTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : Tuple = ["RemBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"RemBertForCausalLM",
"RemBertForMaskedLM",
"RemBertForMultipleChoice",
"RemBertForQuestionAnswering",
"RemBertForSequenceClassification",
"RemBertForTokenClassification",
"RemBertLayer",
"RemBertModel",
"RemBertPreTrainedModel",
"load_tf_weights_in_rembert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : List[str] = [
"TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRemBertForCausalLM",
"TFRemBertForMaskedLM",
"TFRemBertForMultipleChoice",
"TFRemBertForQuestionAnswering",
"TFRemBertForSequenceClassification",
"TFRemBertForTokenClassification",
"TFRemBertLayer",
"TFRemBertModel",
"TFRemBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert import RemBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_rembert_fast import RemBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rembert import (
REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RemBertForCausalLM,
RemBertForMaskedLM,
RemBertForMultipleChoice,
RemBertForQuestionAnswering,
RemBertForSequenceClassification,
RemBertForTokenClassification,
RemBertLayer,
RemBertModel,
RemBertPreTrainedModel,
load_tf_weights_in_rembert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rembert import (
TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRemBertForCausalLM,
TFRemBertForMaskedLM,
TFRemBertForMultipleChoice,
TFRemBertForQuestionAnswering,
TFRemBertForSequenceClassification,
TFRemBertForTokenClassification,
TFRemBertLayer,
TFRemBertModel,
TFRemBertPreTrainedModel,
)
else:
import sys
__A : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 595 | 1 |
"""simple docstring"""
import gc
import unittest
from diffusers import FlaxStableDiffusionInpaintPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self : int ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def snake_case ( self : Optional[Any] ):
__lowercase : Tuple = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__lowercase : int = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__lowercase : List[str] = 'xvjiarui/stable-diffusion-2-inpainting'
__lowercase : Tuple = FlaxStableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase , safety_checker=_UpperCAmelCase )
__lowercase : List[str] = 'Face of a yellow cat, high resolution, sitting on a park bench'
__lowercase : Any = jax.random.PRNGKey(0 )
__lowercase : Any = 5_0
__lowercase : Optional[int] = jax.device_count()
__lowercase : List[Any] = num_samples * [prompt]
__lowercase : Dict = num_samples * [init_image]
__lowercase : Dict = num_samples * [mask_image]
__lowercase : List[Any] = pipeline.prepare_inputs(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# shard inputs and rng
__lowercase : Any = replicate(_UpperCAmelCase )
__lowercase : Tuple = jax.random.split(_UpperCAmelCase , jax.device_count() )
__lowercase : Union[str, Any] = shard(_UpperCAmelCase )
__lowercase : List[Any] = shard(_UpperCAmelCase )
__lowercase : List[str] = shard(_UpperCAmelCase )
__lowercase : Optional[int] = pipeline(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , jit=_UpperCAmelCase )
__lowercase : Dict = output.images.reshape(_UpperCAmelCase , 5_1_2 , 5_1_2 , 3 )
__lowercase : Optional[Any] = images[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
__lowercase : str = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__lowercase : Optional[int] = jnp.array(
[0.3_6_1_1_3_0_7, 0.3_7_6_4_9_7_3_6, 0.3_7_5_7_4_0_8, 0.3_8_2_1_3_9_5_3, 0.3_9_2_9_5_1_6_7, 0.3_8_4_1_6_3_1, 0.4_1_5_5_4_9_7_8, 0.4_1_3_7_4_7_5, 0.4_2_1_7_0_8_4] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 575 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be trained.'} )
lowerCamelCase : Optional[str] = field(
default='./' , metadata={'help': 'Save dir where model repo is cloned and models updates are saved to.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path of training dataset.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for training.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size for evaluation.'} )
lowerCamelCase : Optional[float] = field(default=0.1 , metadata={'help': 'Value of weight decay.'} )
lowerCamelCase : Optional[int] = field(
default=1_00_00 , metadata={'help': 'Size of buffer used to shuffle streaming dataset.'} )
lowerCamelCase : Optional[float] = field(default=2e-4 , metadata={'help': 'Learning rate fo training.'} )
lowerCamelCase : Optional[str] = field(default='cosine' , metadata={'help': 'Learning rate.'} )
lowerCamelCase : Optional[int] = field(
default=7_50 , metadata={'help': 'Number of warmup steps in the learning rate schedule.'} )
lowerCamelCase : Optional[int] = field(
default=16 , metadata={'help': 'Number of gradient accumulation steps.'} )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Use gradient checkpointing to reduce memory footprint.'} )
lowerCamelCase : Optional[int] = field(default=5_00_00 , metadata={'help': 'Maximum number of training steps.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Sequence lengths used for training.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Training seed.'} )
lowerCamelCase : Optional[int] = field(
default=10_24 , metadata={'help': 'Interval to save checkpoints. Measured as number of forward passes not training steps.'} , )
lowerCamelCase : Optional[str] = field(
default=snake_case__ , metadata={'help': 'States path if the training should continue from a checkpoint folder.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'If True the data is pretokenized.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-valid' , metadata={'help': 'Name or path of validation dataset.'} )
lowerCamelCase : Optional[int] = field(default=2 , metadata={'help': 'Batch size used for evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={'help': 'Maximum number of evaluation steps. If -1 the full dataset is evaluated.'} )
lowerCamelCase : Optional[int] = field(default=10_24 , metadata={'help': 'Length of sequences to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Model name or path of model to be evaluated.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={'help': 'The number of human-eval tasks to run. If not included all tasks are evaluated.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'Sample from the language model\'s output distribution.'} )
lowerCamelCase : Optional[float] = field(default=0.2 , metadata={'help': 'Sampling temperature used for generation.'} )
lowerCamelCase : Optional[int] = field(default=2_56 , metadata={'help': 'Maximum number of newly generated tokens.'} )
lowerCamelCase : Optional[int] = field(default=0 , metadata={'help': 'Top-k parameter used for generation.'} )
lowerCamelCase : Optional[float] = field(default=0.95 , metadata={'help': 'Top-p parameter used for nucleus sampling.'} )
lowerCamelCase : Optional[int] = field(default=10 , metadata={'help': 'Number of generations to run in parallel.'} )
lowerCamelCase : Optional[int] = field(
default=2_00 , metadata={'help': 'Number of completions to generate for each sample.'} )
lowerCamelCase : Optional[int] = field(default=1 , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='eval_results.json' , metadata={'help': 'Random seed used for evaluation.'} )
lowerCamelCase : Optional[str] = field(
default='0' , metadata={'help': 'Allow `code_eval` to execute Python code on machine'} )
lowerCamelCase : Optional[int] = field(
default=-1 , metadata={
'help': (
'Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive'
' number corresponds to which GPU device id to run on.'
)
} , )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[int] = field(
default=snake_case__ , metadata={
'help': 'The number of CPU cores to use for parallel preprocessing. Default uses the maximum available.'
} , )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot' , metadata={'help': 'Folder or name of dataset to process.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot-clean' , metadata={'help': 'Folder to save processed processed dataset.'} )
lowerCamelCase : Optional[int] = field(
default=10_00_00 , metadata={'help': 'Number of files to save per JSON output file.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[float] = field(
default=10_00 , metadata={'help': 'Maximum line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1_00 , metadata={'help': 'Maximum mean line length in file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.25 , metadata={'help': 'Maximum fraction of non-alphanumeric characters, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=1.5 , metadata={'help': 'Minimum character token ratio for the file, otherwise file is filtered.'} )
lowerCamelCase : Optional[float] = field(
default=0.7 , metadata={'help': 'Probability for filtering config, test and uncommon files.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} , )
lowerCamelCase : Optional[bool] = field(
default=snake_case__ , metadata={'help': 'If True, near-duplicate samples are removed.'} )
lowerCamelCase : Optional[float] = field(
default=0.85 , metadata={'help': 'Jaccard threshold for near-duplicate samples.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2' , metadata={'help': 'Base tokenizer to build new tokenizer from.'} )
lowerCamelCase : Optional[str] = field(
default='transformersbook/codeparrot-train' , metadata={'help': 'Dataset to train tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='content' , metadata={'help': 'Column containing text data to process.'} )
lowerCamelCase : Optional[int] = field(default=20_00_00 , metadata={'help': 'Number of examples to train tokenizer on.'} )
lowerCamelCase : Optional[int] = field(
default=3_27_68 , metadata={'help': 'Number of examples to train the tokenizer on.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of new tokenizer.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Name or path to the tokenizer.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot-clean-train' , metadata={'help': 'Name or path to the dataset to pretokenize.'} )
lowerCamelCase : Optional[str] = field(
default='tokenized-codeparrot-train' , metadata={'help': 'Repo name of the pretokenized data.'} )
lowerCamelCase : Optional[int] = field(default=snake_case__ , metadata={'help': 'Number of workers used for code evaluation.'} )
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : Optional[str] = field(
default='gpt2-large' , metadata={'help': 'Configuration to use for model initialization.'} )
lowerCamelCase : Optional[str] = field(
default='codeparrot/codeparrot' , metadata={'help': 'Tokenizer attached to model.'} )
lowerCamelCase : Optional[str] = field(default='codeparrot' , metadata={'help': 'Name of the created model.'} )
lowerCamelCase : Optional[bool] = field(default=snake_case__ , metadata={'help': 'Push saved tokenizer to the hub.'} ) | 687 | 0 |
'''simple docstring'''
import argparse
import os
import re
lowercase__ : Optional[Any] = "src/diffusers"
# Pattern that looks at the indentation in a line.
lowercase__ : Any = re.compile(R"^(\s*)\S")
# Pattern that matches `"key":" and puts `key` in group 0.
lowercase__ : Tuple = re.compile(R"^\s*\"([^\"]+)\":")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
lowercase__ : str = re.compile(R"^\s*_import_structure\[\"([^\"]+)\"\]")
# Pattern that matches `"key",` and puts `key` in group 0.
lowercase__ : List[str] = re.compile(R"^\s*\"([^\"]+)\",\s*$")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
lowercase__ : Tuple = re.compile(R"\[([^\]]+)\]")
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = _re_indent.search(lowercase__ )
return "" if search is None else search.groups()[0]
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : int="" , _UpperCamelCase : int=None , _UpperCamelCase : str=None ):
'''simple docstring'''
UpperCAmelCase_ = 0
UpperCAmelCase_ = code.split('''\n''' )
if start_prompt is not None:
while not lines[index].startswith(lowercase__ ):
index += 1
UpperCAmelCase_ = ['''\n'''.join(lines[:index] )]
else:
UpperCAmelCase_ = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
UpperCAmelCase_ = [lines[index]]
index += 1
while index < len(lowercase__ ) and (end_prompt is None or not lines[index].startswith(lowercase__ )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(lowercase__ ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + ''' ''' ):
current_block.append(lines[index] )
blocks.append('''\n'''.join(lowercase__ ) )
if index < len(lowercase__ ) - 1:
UpperCAmelCase_ = [lines[index + 1]]
index += 1
else:
UpperCAmelCase_ = []
else:
blocks.append('''\n'''.join(lowercase__ ) )
UpperCAmelCase_ = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(lowercase__ ) > 0:
blocks.append('''\n'''.join(lowercase__ ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(lowercase__ ):
blocks.append('''\n'''.join(lines[index:] ) )
return blocks
def __lowerCamelCase ( _UpperCamelCase : Dict ):
'''simple docstring'''
def _inner(_UpperCamelCase : int ):
return key(lowercase__ ).lower().replace('''_''' , '''''' )
return _inner
def __lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : str=None ):
'''simple docstring'''
def noop(_UpperCamelCase : List[Any] ):
return x
if key is None:
UpperCAmelCase_ = noop
# Constants are all uppercase, they go first.
UpperCAmelCase_ = [obj for obj in objects if key(lowercase__ ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
UpperCAmelCase_ = [obj for obj in objects if key(lowercase__ )[0].isupper() and not key(lowercase__ ).isupper()]
# Functions begin with a lowercase, they go last.
UpperCAmelCase_ = [obj for obj in objects if not key(lowercase__ )[0].isupper()]
UpperCAmelCase_ = ignore_underscore(lowercase__ )
return sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ ) + sorted(lowercase__ , key=lowercase__ )
def __lowerCamelCase ( _UpperCamelCase : Dict ):
'''simple docstring'''
def _replace(_UpperCamelCase : List[str] ):
UpperCAmelCase_ = match.groups()[0]
if "," not in imports:
return F"""[{imports}]"""
UpperCAmelCase_ = [part.strip().replace('''\"''' , '''''' ) for part in imports.split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ = keys[:-1]
return "[" + ", ".join([F"""\"{k}\"""" for k in sort_objects(lowercase__ )] ) + "]"
UpperCAmelCase_ = import_statement.split('''\n''' )
if len(lowercase__ ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
UpperCAmelCase_ = 2 if lines[1].strip() == '''[''' else 1
UpperCAmelCase_ = [(i, _re_strip_line.search(lowercase__ ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
UpperCAmelCase_ = sort_objects(lowercase__ , key=lambda _UpperCamelCase : x[1] )
UpperCAmelCase_ = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(lowercase__ ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
UpperCAmelCase_ = _re_bracket_content.sub(_replace , lines[1] )
else:
UpperCAmelCase_ = [part.strip().replace('''\"''' , '''''' ) for part in lines[1].split(''',''' )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
UpperCAmelCase_ = keys[:-1]
UpperCAmelCase_ = get_indent(lines[1] ) + ''', '''.join([F"""\"{k}\"""" for k in sort_objects(lowercase__ )] )
return "\n".join(lowercase__ )
else:
# Finally we have to deal with imports fitting on one line
UpperCAmelCase_ = _re_bracket_content.sub(_replace , lowercase__ )
return import_statement
def __lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Dict=True ):
'''simple docstring'''
with open(lowercase__ , '''r''' ) as f:
UpperCAmelCase_ = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
UpperCAmelCase_ = split_code_in_indented_blocks(
lowercase__ , start_prompt='''_import_structure = {''' , end_prompt='''if TYPE_CHECKING:''' )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(lowercase__ ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
UpperCAmelCase_ = main_blocks[block_idx]
UpperCAmelCase_ = block.split('''\n''' )
# Get to the start of the imports.
UpperCAmelCase_ = 0
while line_idx < len(lowercase__ ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
UpperCAmelCase_ = len(lowercase__ )
else:
line_idx += 1
if line_idx >= len(lowercase__ ):
continue
# Ignore beginning and last line: they don't contain anything.
UpperCAmelCase_ = '''\n'''.join(block_lines[line_idx:-1] )
UpperCAmelCase_ = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
UpperCAmelCase_ = split_code_in_indented_blocks(lowercase__ , indent_level=lowercase__ )
# We have two categories of import key: list or _import_structure[key].append/extend
UpperCAmelCase_ = _re_direct_key if '''_import_structure''' in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
UpperCAmelCase_ = [(pattern.search(lowercase__ ).groups()[0] if pattern.search(lowercase__ ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
UpperCAmelCase_ = [(i, key) for i, key in enumerate(lowercase__ ) if key is not None]
UpperCAmelCase_ = [x[0] for x in sorted(lowercase__ , key=lambda _UpperCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
for i in range(len(lowercase__ ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
UpperCAmelCase_ = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(lowercase__ )
count += 1
# And we put our main block back together with its first and last line.
UpperCAmelCase_ = '''\n'''.join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(lowercase__ ):
if check_only:
return True
else:
print(F"""Overwriting {file}.""" )
with open(lowercase__ , '''w''' ) as f:
f.write('''\n'''.join(lowercase__ ) )
def __lowerCamelCase ( _UpperCamelCase : Optional[Any]=True ):
'''simple docstring'''
UpperCAmelCase_ = []
for root, _, files in os.walk(lowercase__ ):
if "__init__.py" in files:
UpperCAmelCase_ = sort_imports(os.path.join(lowercase__ , '''__init__.py''' ) , check_only=lowercase__ )
if result:
UpperCAmelCase_ = [os.path.join(lowercase__ , '''__init__.py''' )]
if len(lowercase__ ) > 0:
raise ValueError(F"""Would overwrite {len(lowercase__ )} files, run `make style`.""" )
if __name__ == "__main__":
lowercase__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument("--check_only", action="store_true", help="Whether to only check or fix style.")
lowercase__ : str = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 719 | '''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase_ = False
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase_ = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def __lowerCamelCase ( _UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = plt.imshow(_UpperCamelCase )
fig.axes.get_xaxis().set_visible(_UpperCamelCase )
fig.axes.get_yaxis().set_visible(_UpperCamelCase )
plt.show()
def __lowerCamelCase ( ):
'''simple docstring'''
UpperCAmelCase_ = datetime.now()
UpperCAmelCase_ = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 43 | 0 |
import argparse
from collections import defaultdict
import yaml
__lowercase = '''docs/source/en/_toctree.yml'''
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Optional[int] = defaultdict(__UpperCAmelCase )
for doc in model_doc:
counts[doc["local"]] += 1
__UpperCamelCase :Any = [key for key, value in counts.items() if value > 1]
__UpperCamelCase :int = []
for duplicate_key in duplicates:
__UpperCamelCase :Union[str, Any] = list({doc['''title'''] for doc in model_doc if doc['''local'''] == duplicate_key} )
if len(__UpperCAmelCase ) > 1:
raise ValueError(
f"""{duplicate_key} is present several times in the documentation table of content at """
'''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '''
'''others.''' )
# Only add this once
new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc['''local''']] == 1] )
# Sort
return sorted(__UpperCAmelCase , key=lambda SCREAMING_SNAKE_CASE : s["title"].lower() )
def lowerCamelCase ( SCREAMING_SNAKE_CASE=False ):
'''simple docstring'''
with open(__UpperCAmelCase , encoding='''utf-8''' ) as f:
__UpperCamelCase :int = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCamelCase :Union[str, Any] = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCamelCase :int = content[api_idx]['''sections''']
# Then to the model doc
__UpperCamelCase :int = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__UpperCamelCase :Optional[int] = api_doc[model_idx]['''sections''']
__UpperCamelCase :Dict = [(idx, section) for idx, section in enumerate(__UpperCAmelCase ) if '''sections''' in section]
__UpperCamelCase :Optional[int] = False
for idx, modality_doc in modalities_docs:
__UpperCamelCase :List[str] = modality_doc['''sections''']
__UpperCamelCase :Dict = clean_model_doc_toc(__UpperCAmelCase )
if old_modality_doc != new_modality_doc:
__UpperCamelCase :str = True
if overwrite:
__UpperCamelCase :Optional[int] = new_modality_doc
if diff:
if overwrite:
__UpperCamelCase :List[str] = model_doc
__UpperCamelCase :Optional[Any] = api_doc
with open(__UpperCAmelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(yaml.dump(__UpperCAmelCase , allow_unicode=__UpperCAmelCase ) )
else:
raise ValueError(
'''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
__lowercase = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 167 | """simple docstring"""
import math
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase = 1_0_0 ) -> int:
lowercase__: Dict = sum(i * i for i in range(1 , n + 1 ) )
lowercase__: int = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 586 | 0 |
SCREAMING_SNAKE_CASE = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE = {
0: 'Sunday',
1: 'Monday',
2: 'Tuesday',
3: 'Wednesday',
4: 'Thursday',
5: 'Friday',
6: 'Saturday',
}
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
assert len(str(lowerCAmelCase__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
__a = year // 100
__a = (5 * (century % 4) + 2) % 7
__a = year % 100
__a = centurian % 12
__a = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
__a = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
__a = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 |
from math import pow
def a (lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
__a = int(pow(lowerCAmelCase__ , lowerCAmelCase__ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
__a , __a = backtrack(
lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
__a , __a = backtrack(
lowerCAmelCase__ , lowerCAmelCase__ , current_number + 1 , lowerCAmelCase__ , lowerCAmelCase__ )
return current_sum, solutions_count
def a (lowerCAmelCase__ , lowerCAmelCase__ ):
if not (1 <= needed_sum <= 1_000 and 2 <= power <= 10):
raise ValueError(
"""Invalid input\n"""
"""needed_sum must be between 1 and 1000, power between 2 and 10.""" )
return backtrack(lowerCAmelCase__ , lowerCAmelCase__ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class A__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , __snake_case , __snake_case=7 , __snake_case=3 , __snake_case=1_8 , __snake_case=3_0 , __snake_case=4_0_0 , __snake_case=True , __snake_case=None , __snake_case=True , ):
snake_case = size if size is not None else {'''height''': 1_8, '''width''': 1_8}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def a_ ( self ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def a_ ( self ):
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def a_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def a_ ( self ):
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , '''do_resize''' ) )
self.assertTrue(hasattr(__snake_case , '''size''' ) )
self.assertTrue(hasattr(__snake_case , '''apply_ocr''' ) )
def a_ ( self ):
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 1_8, '''width''': 1_8} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {'''height''': 4_2, '''width''': 4_2} )
def a_ ( self ):
pass
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a_ ( self ):
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
) , )
def a_ ( self ):
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset('''hf-internal-testing/fixtures_docvqa''' , split='''test''' )
snake_case = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
snake_case = image_processing(__snake_case , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
snake_case = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 550 |
import os
from distutils.util import strtobool
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_ ):
"""simple docstring"""
for e in env_keys:
snake_case = int(os.environ.get(UpperCamelCase_ ,-1 ) )
if val >= 0:
return val
return default
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_=False ):
"""simple docstring"""
snake_case = os.environ.get(UpperCamelCase_ ,str(UpperCamelCase_ ) )
return strtobool(UpperCamelCase_ ) == 1 # As its name indicates `strtobool` actually returns an int...
def UpperCAmelCase__ (UpperCamelCase_ ,UpperCamelCase_="no" ):
"""simple docstring"""
snake_case = os.environ.get(UpperCamelCase_ ,str(UpperCamelCase_ ) )
return value
| 550 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase_ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""ChineseCLIPFeatureExtractor"""]
UpperCamelCase_ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 718 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = tf.data.AUTOTUNE
def _lowerCAmelCase ( ) -> Any:
lowercase : Dict =argparse.ArgumentParser(description='''Train a masked language model on TPU.''' )
parser.add_argument(
'''--pretrained_model_config''' , type=__magic_name__ , default='''roberta-base''' , help='''The model config to use. Note that we don\'t copy the model\'s weights, only the config!''' , )
parser.add_argument(
'''--tokenizer''' , type=__magic_name__ , default='''unigram-tokenizer-wikitext''' , help='''The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model\'s vocab size.''' , )
parser.add_argument(
'''--per_replica_batch_size''' , type=__magic_name__ , default=8 , help='''Batch size per TPU core.''' , )
parser.add_argument(
'''--no_tpu''' , action='''store_true''' , help='''If set, run on CPU and don\'t try to initialize a TPU. Useful for debugging on non-TPU instances.''' , )
parser.add_argument(
'''--tpu_name''' , type=__magic_name__ , help='''Name of TPU resource to initialize. Should be blank on Colab, and \'local\' on TPU VMs.''' , default='''local''' , )
parser.add_argument(
'''--tpu_zone''' , type=__magic_name__ , help='''Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes.''' , )
parser.add_argument(
'''--gcp_project''' , type=__magic_name__ , help='''Google cloud project name. Only used for non-Colab TPU nodes.''' )
parser.add_argument(
'''--bfloat16''' , action='''store_true''' , help='''Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU.''' , )
parser.add_argument(
'''--train_dataset''' , type=__magic_name__ , help='''Path to training dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--shuffle_buffer_size''' , type=__magic_name__ , default=2**18 , help='''Size of the shuffle buffer (in samples)''' , )
parser.add_argument(
'''--eval_dataset''' , type=__magic_name__ , help='''Path to evaluation dataset to load. If the path begins with `gs://`'''
''' then the dataset will be loaded from a Google Cloud Storage bucket.''' , )
parser.add_argument(
'''--num_epochs''' , type=__magic_name__ , default=1 , help='''Number of epochs to train for.''' , )
parser.add_argument(
'''--learning_rate''' , type=__magic_name__ , default=1E-4 , help='''Learning rate to use for training.''' , )
parser.add_argument(
'''--weight_decay_rate''' , type=__magic_name__ , default=1E-3 , help='''Weight decay rate to use for training.''' , )
parser.add_argument(
'''--max_length''' , type=__magic_name__ , default=512 , help='''Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py''' , )
parser.add_argument(
'''--mlm_probability''' , type=__magic_name__ , default=0.1_5 , help='''Fraction of tokens to mask during training.''' , )
parser.add_argument('''--output_dir''' , type=__magic_name__ , required=__magic_name__ , help='''Path to save model checkpoints to.''' )
parser.add_argument('''--hub_model_id''' , type=__magic_name__ , help='''Model ID to upload to on the Hugging Face Hub.''' )
lowercase : Union[str, Any] =parser.parse_args()
return args
def _lowerCAmelCase ( __magic_name__ : List[str] ) -> List[Any]:
try:
if args.tpu_name:
lowercase : Dict =tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
lowercase : Optional[int] =tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
'''Couldn\'t connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or '''
'''--gcp_project. When running on a TPU VM, use --tpu_name local.''' )
tf.config.experimental_connect_to_cluster(__magic_name__ )
tf.tpu.experimental.initialize_tpu_system(__magic_name__ )
return tpu
def _lowerCAmelCase ( __magic_name__ : Tuple ) -> Union[str, Any]:
lowercase : str =0
for file in file_list:
lowercase : List[str] =file.split('''/''' )[-1]
lowercase : Union[str, Any] =re.search(R'''-\d+-(\d+)\.tfrecord''' , __magic_name__ ).group(1 )
lowercase : int =int(__magic_name__ )
num_samples += sample_count
return num_samples
def _lowerCAmelCase ( __magic_name__ : List[str] , __magic_name__ : Union[str, Any] , __magic_name__ : int , __magic_name__ : Optional[Any] , __magic_name__ : Optional[int] , __magic_name__ : Optional[int]=None ) -> str:
lowercase : int =count_samples(__magic_name__ )
lowercase : Union[str, Any] =tf.data.Dataset.from_tensor_slices(__magic_name__ )
if shuffle:
lowercase : Union[str, Any] =dataset.shuffle(len(__magic_name__ ) )
lowercase : Any =tf.data.TFRecordDataset(__magic_name__ , num_parallel_reads=__magic_name__ )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
lowercase : Optional[int] =dataset.apply(tf.data.experimental.assert_cardinality(__magic_name__ ) )
lowercase : str =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
if shuffle:
assert shuffle_buffer_size is not None
lowercase : int =dataset.shuffle(args.shuffle_buffer_size )
lowercase : Optional[int] =dataset.batch(__magic_name__ , drop_remainder=__magic_name__ )
lowercase : int =dataset.map(__magic_name__ , num_parallel_calls=__magic_name__ )
lowercase : Union[str, Any] =dataset.prefetch(__magic_name__ )
return dataset
def _lowerCAmelCase ( __magic_name__ : Any ) -> str:
if not args.no_tpu:
lowercase : Optional[Any] =initialize_tpu(__magic_name__ )
lowercase : Any =tf.distribute.TPUStrategy(__magic_name__ )
else:
lowercase : Optional[Any] =tf.distribute.OneDeviceStrategy(device='''/gpu:0''' )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy('''mixed_bfloat16''' )
lowercase : Any =AutoTokenizer.from_pretrained(args.tokenizer )
lowercase : Union[str, Any] =AutoConfig.from_pretrained(args.pretrained_model_config )
lowercase : Optional[Any] =tokenizer.vocab_size
lowercase : str =tf.io.gfile.glob(os.path.join(args.train_dataset , '''*.tfrecord''' ) )
if not training_records:
raise ValueError(f'''No .tfrecord files found in {args.train_dataset}.''' )
lowercase : Optional[int] =tf.io.gfile.glob(os.path.join(args.eval_dataset , '''*.tfrecord''' ) )
if not eval_records:
raise ValueError(f'''No .tfrecord files found in {args.eval_dataset}.''' )
lowercase : Any =count_samples(__magic_name__ )
lowercase : str =num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
lowercase : Union[str, Any] =steps_per_epoch * args.num_epochs
with strategy.scope():
lowercase : List[Any] =TFAutoModelForMaskedLM.from_config(__magic_name__ )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
lowercase , lowercase : Dict =create_optimizer(
num_train_steps=__magic_name__ , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=__magic_name__ , metrics=['''accuracy'''] )
def decode_fn(__magic_name__ : Optional[Any] ):
lowercase : Union[str, Any] ={
'''input_ids''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
'''attention_mask''': tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(__magic_name__ , __magic_name__ )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
lowercase : str =DataCollatorForLanguageModeling(
tokenizer=__magic_name__ , mlm_probability=args.mlm_probability , mlm=__magic_name__ , return_tensors='''tf''' )
def mask_with_collator(__magic_name__ : Dict ):
# TF really needs an isin() function
lowercase : int =(
~tf.cast(batch['''attention_mask'''] , tf.bool )
| (batch['''input_ids'''] == tokenizer.cls_token_id)
| (batch['''input_ids'''] == tokenizer.sep_token_id)
)
lowercase , lowercase : Union[str, Any] =data_collator.tf_mask_tokens(
batch['''input_ids'''] , vocab_size=len(__magic_name__ ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=__magic_name__ , )
return batch
lowercase : List[str] =args.per_replica_batch_size * strategy.num_replicas_in_sync
lowercase : Dict =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , shuffle_buffer_size=args.shuffle_buffer_size , )
lowercase : Union[str, Any] =prepare_dataset(
__magic_name__ , decode_fn=__magic_name__ , mask_fn=__magic_name__ , batch_size=__magic_name__ , shuffle=__magic_name__ , )
lowercase : Tuple =[]
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=__magic_name__ ) )
model.fit(
__magic_name__ , validation_data=__magic_name__ , epochs=args.num_epochs , callbacks=__magic_name__ , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
UpperCamelCase_ = parse_args()
main(args)
| 88 | 0 |
'''simple docstring'''
import numpy as np
def UpperCAmelCase_ ( A ):
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 120 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ : Any = logging.get_logger(__name__)
UpperCAmelCase_ : Any = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class a ( snake_case__ ):
'''simple docstring'''
__lowerCAmelCase : Tuple = """sew"""
def __init__( self , lowerCamelCase_=3_2 , lowerCamelCase_=7_6_8 , lowerCamelCase_=1_2 , lowerCamelCase_=1_2 , lowerCamelCase_=3_0_7_2 , lowerCamelCase_=2 , lowerCamelCase_="gelu" , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.0 , lowerCamelCase_=0.1 , lowerCamelCase_=0.1 , lowerCamelCase_=0.02 , lowerCamelCase_=1e-5 , lowerCamelCase_="group" , lowerCamelCase_="gelu" , lowerCamelCase_=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , lowerCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , lowerCamelCase_=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , lowerCamelCase_=False , lowerCamelCase_=1_2_8 , lowerCamelCase_=1_6 , lowerCamelCase_=True , lowerCamelCase_=0.05 , lowerCamelCase_=1_0 , lowerCamelCase_=2 , lowerCamelCase_=0.0 , lowerCamelCase_=1_0 , lowerCamelCase_=0 , lowerCamelCase_="mean" , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=2_5_6 , lowerCamelCase_=0 , lowerCamelCase_=1 , lowerCamelCase_=2 , **lowerCamelCase_ , ) -> Tuple:
super().__init__(**lowerCamelCase_ , pad_token_id=lowerCamelCase_ , bos_token_id=lowerCamelCase_ , eos_token_id=lowerCamelCase_ )
_a : Optional[int] = hidden_size
_a : int = feat_extract_norm
_a : Optional[int] = feat_extract_activation
_a : str = list(lowerCamelCase_ )
_a : Union[str, Any] = list(lowerCamelCase_ )
_a : List[Any] = list(lowerCamelCase_ )
_a : Union[str, Any] = conv_bias
_a : Optional[int] = num_conv_pos_embeddings
_a : Dict = num_conv_pos_embedding_groups
_a : str = len(self.conv_dim )
_a : Any = num_hidden_layers
_a : List[Any] = intermediate_size
_a : Tuple = squeeze_factor
_a : Tuple = hidden_act
_a : Any = num_attention_heads
_a : Optional[int] = hidden_dropout
_a : List[str] = attention_dropout
_a : Optional[Any] = activation_dropout
_a : str = feat_proj_dropout
_a : str = final_dropout
_a : str = layerdrop
_a : Optional[Any] = layer_norm_eps
_a : Optional[Any] = initializer_range
_a : Any = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect.'
'It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,'
F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_a : str = apply_spec_augment
_a : List[Any] = mask_time_prob
_a : Optional[Any] = mask_time_length
_a : Union[str, Any] = mask_time_min_masks
_a : List[str] = mask_feature_prob
_a : List[str] = mask_feature_length
_a : str = mask_feature_min_masks
# ctc loss
_a : Any = ctc_loss_reduction
_a : Optional[Any] = ctc_zero_infinity
# sequence classification
_a : List[Any] = use_weighted_layer_sum
_a : Tuple = classifier_proj_size
@property
def __UpperCamelCase ( self ) -> Optional[int]:
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 120 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
_snake_case : Optional[Any] = logging.get_logger('transformers.models.speecht5')
def snake_case_ (UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : Dict ):
'''simple docstring'''
hf_model.apply_weight_norm()
_a = checkpoint['''input_conv.weight_g''']
_a = checkpoint['''input_conv.weight_v''']
_a = checkpoint['''input_conv.bias''']
for i in range(len(config.upsample_rates ) ):
_a = checkpoint[f'upsamples.{i}.1.weight_g']
_a = checkpoint[f'upsamples.{i}.1.weight_v']
_a = checkpoint[f'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_g']
_a = checkpoint[f'blocks.{i}.convs1.{j}.1.weight_v']
_a = checkpoint[f'blocks.{i}.convs1.{j}.1.bias']
_a = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_g']
_a = checkpoint[f'blocks.{i}.convs2.{j}.1.weight_v']
_a = checkpoint[f'blocks.{i}.convs2.{j}.1.bias']
_a = checkpoint['''output_conv.1.weight_g''']
_a = checkpoint['''output_conv.1.weight_v''']
_a = checkpoint['''output_conv.1.bias''']
hf_model.remove_weight_norm()
@torch.no_grad()
def snake_case_ (UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str] , UpperCamelCase : Any=None , UpperCamelCase : Tuple=None , ):
'''simple docstring'''
if config_path is not None:
_a = SpeechTaHifiGanConfig.from_pretrained(UpperCamelCase )
else:
_a = SpeechTaHifiGanConfig()
_a = SpeechTaHifiGan(UpperCamelCase )
_a = torch.load(UpperCamelCase )
load_weights(orig_checkpoint['''model''']['''generator'''] , UpperCamelCase , UpperCamelCase )
_a = np.load(UpperCamelCase )
_a = stats[0].reshape(-1 )
_a = stats[1].reshape(-1 )
_a = torch.from_numpy(UpperCamelCase ).float()
_a = torch.from_numpy(UpperCamelCase ).float()
model.save_pretrained(UpperCamelCase )
if repo_id:
print('''Pushing to the hub...''' )
model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
_snake_case : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_snake_case : List[Any] = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 377 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case : Tuple = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = PegasusTokenizer(lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/pegasus-large''' )
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : List[str] ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] , lowerCAmelCase_ : List[str] ) -> Any:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self : Optional[Any] ) -> Any:
"""simple docstring"""
_a = '''</s>'''
_a = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
_a = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''</s>''' )
self.assertEqual(vocab_keys[-1] , '''v''' )
self.assertEqual(len(lowerCAmelCase_ ) , 11_03 )
def __lowerCAmelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def __lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
_a = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a = (
'''Let\'s see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important'''
''' </s> <pad> <pad> <pad>'''
)
_a = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
_a = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
_a = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
_a = '''<mask_1> To ensure a <mask_2> flow of bank resolutions.'''
_a = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_a = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_a = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
_a = '''To ensure a smooth flow of bank resolutions.'''
_a = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
_a = tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
_a = ['''This is going to be way too long.''' * 1_50, '''short example''']
_a = ['''not super long but more than 5 tokens''', '''tiny''']
_a = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
_a = self._large_tokenizer(
text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask.
@slow
def __lowerCAmelCase ( self : List[Any] ) -> int:
"""simple docstring"""
_a = {'''input_ids''': [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name='''google/bigbird-pegasus-large-arxiv''' , revision='''ba85d0851d708441f91440d509690f1ab6353415''' , )
@require_sentencepiece
@require_tokenizers
class A ( _a ,unittest.TestCase ):
lowercase_ = PegasusTokenizer
lowercase_ = PegasusTokenizerFast
lowercase_ = True
lowercase_ = True
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
_a = PegasusTokenizer(lowerCAmelCase_ , offset=0 , mask_token_sent=lowerCAmelCase_ , mask_token='''[MASK]''' )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Dict ) -> int:
"""simple docstring"""
return PegasusTokenizer.from_pretrained('''google/bigbird-pegasus-large-arxiv''' )
def __lowerCAmelCase ( self : Optional[int] , **lowerCAmelCase_ : Tuple ) -> PegasusTokenizer:
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def __lowerCAmelCase ( self : str , lowerCAmelCase_ : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
return ("This is a test", "This is a test")
def __lowerCAmelCase ( self : List[Any] ) -> str:
"""simple docstring"""
_a = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
_a = self.tokenizer_class.from_pretrained(self.tmpdirname )
_a = (
'''Let\'s see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>'''
''' <pad> <pad> <pad>'''
)
_a = rust_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
_a = py_tokenizer([raw_input_str] , return_tensors=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ).input_ids[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
@require_torch
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
_a = ['''This is going to be way too long.''' * 10_00, '''short example''']
_a = ['''not super long but more than 5 tokens''', '''tiny''']
_a = self._large_tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
_a = self._large_tokenizer(
text_target=lowerCAmelCase_ , max_length=5 , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='''pt''' )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(lowerCAmelCase_ ) == 2 # input_ids, attention_mask.
def __lowerCAmelCase ( self : List[str] ) -> Any:
"""simple docstring"""
_a = (
'''This is an example string that is used to test the original TF implementation against the HF'''
''' implementation'''
)
_a = self._large_tokenizer(lowerCAmelCase_ ).input_ids
self.assertListEqual(
lowerCAmelCase_ , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 377 | 1 |
"""simple docstring"""
from tempfile import TemporaryDirectory
from unittest import TestCase
from unittest.mock import MagicMock, patch
from transformers import AutoModel, TFAutoModel
from transformers.onnx import FeaturesManager
from transformers.testing_utils import SMALL_MODEL_IDENTIFIER, require_tf, require_torch
@require_torch
@require_tf
class _lowerCAmelCase ( a ):
"""simple docstring"""
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = SMALL_MODEL_IDENTIFIER
lowerCAmelCase__ :List[Any] = 'pt'
lowerCAmelCase__ :Optional[Any] = 'tf'
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = AutoModel.from_pretrained(self.test_model )
model_pt.save_pretrained(__UpperCAmelCase )
def snake_case ( self , __UpperCAmelCase ):
'''simple docstring'''
lowerCAmelCase__ :Dict = TFAutoModel.from_pretrained(self.test_model , from_pt=__UpperCAmelCase )
model_tf.save_pretrained(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = 'mock_framework'
# Framework provided - return whatever the user provides
lowerCAmelCase__ :Dict = FeaturesManager.determine_framework(self.test_model , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# Local checkpoint and framework provided - return provided framework
# PyTorch checkpoint
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :int = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase , __UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , __UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
with TemporaryDirectory() as local_pt_ckpt:
self._setup_pt_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# TensorFlow checkpoint
with TemporaryDirectory() as local_tf_ckpt:
self._setup_tf_ckpt(__UpperCAmelCase )
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Invalid local checkpoint
with TemporaryDirectory() as local_invalid_ckpt:
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(__UpperCAmelCase )
def snake_case ( self ):
'''simple docstring'''
lowerCAmelCase__ :Dict = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ):
lowerCAmelCase__ :List[Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# PyTorch not in environment -> use TensorFlow
lowerCAmelCase__ :List[Any] = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_tf )
# Both in environment -> use PyTorch
lowerCAmelCase__ :Any = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ :int = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
lowerCAmelCase__ :str = FeaturesManager.determine_framework(self.test_model )
self.assertEqual(__UpperCAmelCase , self.framework_pt )
# Both not in environment -> raise error
lowerCAmelCase__ :Optional[Any] = MagicMock(return_value=__UpperCAmelCase )
lowerCAmelCase__ :Tuple = MagicMock(return_value=__UpperCAmelCase )
with patch('transformers.onnx.features.is_tf_available' , __UpperCAmelCase ), patch(
'transformers.onnx.features.is_torch_available' , __UpperCAmelCase ):
with self.assertRaises(__UpperCAmelCase ):
lowerCAmelCase__ :Union[str, Any] = FeaturesManager.determine_framework(self.test_model )
| 93 |
"""simple docstring"""
import re
def __A (_SCREAMING_SNAKE_CASE ) ->list:
"""simple docstring"""
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
lowerCAmelCase__ :Optional[Any] = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :Any = split_input(_SCREAMING_SNAKE_CASE )
if upper:
lowerCAmelCase__ :str = ''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
lowerCAmelCase__ :int = ''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_simple_case(_SCREAMING_SNAKE_CASE )
def __A (_SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
try:
lowerCAmelCase__ :str = to_simple_case(_SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '_' )
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->str:
"""simple docstring"""
return to_complex_case(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , '-' )
if __name__ == "__main__":
__import__("""doctest""").testmod()
| 93 | 1 |
"""simple docstring"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> int:
if len(SCREAMING_SNAKE_CASE__ ) != len(SCREAMING_SNAKE_CASE__ ):
raise ValueError("String lengths must match!" )
a_ : Optional[Any] = 0
for chara, chara in zip(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
if chara != chara:
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE_ = {
"""configuration_bigbird_pegasus""": [
"""BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BigBirdPegasusConfig""",
"""BigBirdPegasusOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
"""BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BigBirdPegasusForCausalLM""",
"""BigBirdPegasusForConditionalGeneration""",
"""BigBirdPegasusForQuestionAnswering""",
"""BigBirdPegasusForSequenceClassification""",
"""BigBirdPegasusModel""",
"""BigBirdPegasusPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 370 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(lowerCamelCase__ ) * abs(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 463 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42 # [batch_size x 3]
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def SCREAMING_SNAKE_CASE_( self ) -> Optional[int]:
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def SCREAMING_SNAKE_CASE_( self ) -> str:
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def SCREAMING_SNAKE_CASE_( self ) -> torch.Tensor:
lowerCamelCase_ = torch.arange(self.height * self.width )
lowerCamelCase_ = torch.stack(
[
pixel_indices % self.width,
torch.div(lowercase , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ , *lowerCamelCase_ = self.shape
lowerCamelCase_ = int(np.prod(lowercase ) )
lowerCamelCase_ = self.get_image_coords()
lowerCamelCase_ = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
lowerCamelCase_ = self.get_camera_rays(lowercase )
lowerCamelCase_ = rays.view(lowercase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> torch.Tensor:
lowerCamelCase_ , *lowerCamelCase_ , lowerCamelCase_ = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
lowerCamelCase_ = coords.view(lowercase , -1 , 2 )
lowerCamelCase_ = self.resolution()
lowerCamelCase_ = self.fov()
lowerCamelCase_ = (flat.float() / (res - 1)) * 2 - 1
lowerCamelCase_ = fracs * torch.tan(fov / 2 )
lowerCamelCase_ = fracs.view(lowercase , -1 , 2 )
lowerCamelCase_ = (
self.z.view(lowercase , 1 , 3 )
+ self.x.view(lowercase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(lowercase , 1 , 3 ) * fracs[:, :, 1:]
)
lowerCamelCase_ = directions / directions.norm(dim=-1 , keepdim=lowercase )
lowerCamelCase_ = torch.stack(
[
torch.broadcast_to(self.origin.view(lowercase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(lowercase , *lowercase , 2 , 3 )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase ) -> "DifferentiableProjectiveCamera":
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=lowercase , height=lowercase , x_fov=self.x_fov , y_fov=self.y_fov , )
def lowerCamelCase_ ( lowerCamelCase__ ):
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
lowerCamelCase_ = np.array([np.sin(lowerCamelCase__ ), np.cos(lowerCamelCase__ ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
lowerCamelCase_ = -z * 4
lowerCamelCase_ = np.array([np.cos(lowerCamelCase__ ), -np.sin(lowerCamelCase__ ), 0.0] )
lowerCamelCase_ = np.cross(lowerCamelCase__ , lowerCamelCase__ )
origins.append(lowerCamelCase__ )
xs.append(lowerCamelCase__ )
ys.append(lowerCamelCase__ )
zs.append(lowerCamelCase__ )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , x=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , y=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , z=torch.from_numpy(np.stack(lowerCamelCase__ , axis=0 ) ).float() , width=lowerCamelCase__ , height=lowerCamelCase__ , x_fov=0.7 , y_fov=0.7 , shape=(1, len(lowerCamelCase__ )) , )
| 463 | 1 |
"""simple docstring"""
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ : int = """pixel_values"""
lowercase_ : str = False
lowercase_ : Any = TimmBackboneConfig
def __init__( self , snake_case_ , **snake_case_ ):
"""simple docstring"""
requires_backends(self , 'timm' )
super().__init__(snake_case_ )
A_ : Any = config
if config.backbone is None:
raise ValueError('backbone is not set in the config. Please set it to a timm model name.' )
if config.backbone not in timm.list_models():
raise ValueError(F"""backbone {config.backbone} is not supported by timm.""" )
if hasattr(snake_case_ , 'out_features' ) and config.out_features is not None:
raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.' )
A_ : str = getattr(snake_case_ , 'use_pretrained_backbone' , snake_case_ )
if pretrained is None:
raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.' )
# We just take the final layer by default. This matches the default for the transformers models.
A_ : Dict = config.out_indices if getattr(snake_case_ , 'out_indices' , snake_case_ ) is not None else (-1,)
A_ : str = timm.create_model(
config.backbone , pretrained=snake_case_ , features_only=config.features_only , in_chans=config.num_channels , out_indices=snake_case_ , **snake_case_ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
A_ : Any = self._backbone.return_layers
A_ : Union[str, Any] = {layer['module']: str(snake_case_ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(snake_case_ )
@classmethod
def lowerCamelCase_ ( cls , snake_case_ , *snake_case_ , **snake_case_ ):
"""simple docstring"""
requires_backends(cls , ['vision', 'timm'] )
from ...models.timm_backbone import TimmBackboneConfig
A_ : str = kwargs.pop('config' , TimmBackboneConfig() )
A_ : Optional[Any] = kwargs.pop('use_timm_backbone' , snake_case_ )
if not use_timm:
raise ValueError('use_timm_backbone must be True for timm backbones' )
A_ : Union[str, Any] = kwargs.pop('num_channels' , config.num_channels )
A_ : Optional[int] = kwargs.pop('features_only' , config.features_only )
A_ : Any = kwargs.pop('use_pretrained_backbone' , config.use_pretrained_backbone )
A_ : int = kwargs.pop('out_indices' , config.out_indices )
A_ : Optional[Any] = TimmBackboneConfig(
backbone=snake_case_ , num_channels=snake_case_ , features_only=snake_case_ , use_pretrained_backbone=snake_case_ , out_indices=snake_case_ , )
return super()._from_config(snake_case_ , **snake_case_ )
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
pass
def lowerCamelCase_ ( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , **snake_case_ ):
"""simple docstring"""
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Any = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('Cannot output attentions for timm backbones at the moment' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
A_ : Dict = self._all_layers
A_ : List[str] = self._backbone(snake_case_ , **snake_case_ )
A_ : List[str] = self._return_layers
A_ : Optional[int] = tuple(hidden_states[i] for i in self.out_indices )
else:
A_ : Tuple = self._backbone(snake_case_ , **snake_case_ )
A_ : List[Any] = None
A_ : Union[str, Any] = tuple(snake_case_ )
A_ : List[str] = tuple(snake_case_ ) if hidden_states is not None else None
if not return_dict:
A_ : Tuple = (feature_maps,)
if output_hidden_states:
A_ : Dict = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=snake_case_ , hidden_states=snake_case_ , attentions=snake_case_ ) | 302 |
"""simple docstring"""
import sys
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = len(_UpperCAmelCase )
A_ : int = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
A_ : Tuple = [[0 for x in range(_UpperCAmelCase )] for x in range(_UpperCAmelCase )]
for chain_length in range(2 , _UpperCAmelCase ):
for a in range(1 , n - chain_length + 1 ):
A_ : Optional[Any] = a + chain_length - 1
A_ : List[str] = sys.maxsize
for c in range(_UpperCAmelCase , _UpperCAmelCase ):
A_ : Any = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
A_ : Optional[Any] = cost
A_ : Optional[int] = c
return matrix, sol
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if i == j:
print('A' + str(_UpperCAmelCase ) , end=' ' )
else:
print('(' , end=' ' )
print_optiomal_solution(_UpperCAmelCase , _UpperCAmelCase , optimal_solution[i][j] )
print_optiomal_solution(_UpperCAmelCase , optimal_solution[i][j] + 1 , _UpperCAmelCase )
print(')' , end=' ' )
def UpperCAmelCase__ ( ):
"""simple docstring"""
A_ : Optional[Any] = [30, 35, 15, 5, 10, 20, 25]
A_ : Optional[Any] = len(_UpperCAmelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
A_ , A_ : int = matrix_chain_order(_UpperCAmelCase )
print('No. of Operation required: ' + str(matrix[1][n - 1] ) )
print_optiomal_solution(_UpperCAmelCase , 1 , n - 1 )
if __name__ == "__main__":
main() | 302 | 1 |
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE_ : list[int] ) -> int:
"""simple docstring"""
if not nums:
return 0
_UpperCAmelCase = nums[0]
_UpperCAmelCase = 0
for num in nums[1:]:
_UpperCAmelCase , _UpperCAmelCase = (
max_excluding + num,
max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
)
return max(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 32 |
"""simple docstring"""
__A : Optional[int] = [
"Audio",
"Array2D",
"Array3D",
"Array4D",
"Array5D",
"ClassLabel",
"Features",
"Sequence",
"Value",
"Image",
"Translation",
"TranslationVariableLanguages",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 602 | 0 |
'''simple docstring'''
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class snake_case__ ( UpperCamelCase):
a_ = "char"
a_ = "bpe"
a_ = "wp"
_UpperCamelCase : str = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class snake_case__ ( UpperCamelCase):
a_ = ["image_processor", "char_tokenizer"]
a_ = "ViTImageProcessor"
a_ = "MgpstrTokenizer"
def __init__( self : List[str] , _A : Any=None , _A : List[Any]=None , **_A : int ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _A , )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
UpperCAmelCase_ : Any = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
UpperCAmelCase_ : Optional[int] = tokenizer
UpperCAmelCase_ : Dict = AutoTokenizer.from_pretrained('''gpt2''' )
UpperCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_A , _A )
def __call__( self : Optional[Any] , _A : Any=None , _A : Dict=None , _A : int=None , **_A : str ) -> str:
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
UpperCAmelCase_ : Optional[Any] = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None:
UpperCAmelCase_ : Optional[Any] = self.char_tokenizer(_A , return_tensors=_A , **_A )
if text is None:
return inputs
elif images is None:
return encodings
else:
UpperCAmelCase_ : int = encodings['''input_ids''']
return inputs
def A ( self : Optional[int] , _A : List[Any] ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = sequences
UpperCAmelCase_ : Dict = char_preds.size(0 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = self._decode_helper(_A , '''char''' )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = self._decode_helper(_A , '''bpe''' )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self._decode_helper(_A , '''wp''' )
UpperCAmelCase_ : int = []
UpperCAmelCase_ : int = []
for i in range(_A ):
UpperCAmelCase_ : List[Any] = [char_scores[i], bpe_scores[i], wp_scores[i]]
UpperCAmelCase_ : str = [char_strs[i], bpe_strs[i], wp_strs[i]]
UpperCAmelCase_ : List[str] = scores.index(max(_A ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
UpperCAmelCase_ : int = {}
UpperCAmelCase_ : Tuple = final_strs
UpperCAmelCase_ : int = final_scores
UpperCAmelCase_ : Union[str, Any] = char_strs
UpperCAmelCase_ : int = bpe_strs
UpperCAmelCase_ : List[Any] = wp_strs
return out
def A ( self : Any , _A : Union[str, Any] , _A : Optional[Any] ) -> Dict:
if format == DecodeType.CHARACTER:
UpperCAmelCase_ : List[str] = self.char_decode
UpperCAmelCase_ : int = 1
UpperCAmelCase_ : Tuple = '''[s]'''
elif format == DecodeType.BPE:
UpperCAmelCase_ : List[str] = self.bpe_decode
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : List[str] = '''#'''
elif format == DecodeType.WORDPIECE:
UpperCAmelCase_ : List[Any] = self.wp_decode
UpperCAmelCase_ : int = 1_02
UpperCAmelCase_ : Tuple = '''[SEP]'''
else:
raise ValueError(F"Format {format} is not supported." )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = [], []
UpperCAmelCase_ : Optional[Any] = pred_logits.size(0 )
UpperCAmelCase_ : Any = pred_logits.size(1 )
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = pred_logits.topk(1 , dim=-1 , largest=_A , sorted=_A )
UpperCAmelCase_ : int = preds_index.view(-1 , _A )[:, 1:]
UpperCAmelCase_ : Optional[int] = decoder(_A )
UpperCAmelCase_ , UpperCAmelCase_ : int = torch.nn.functional.softmax(_A , dim=2 ).max(dim=2 )
UpperCAmelCase_ : int = preds_max_prob[:, 1:]
for index in range(_A ):
UpperCAmelCase_ : List[Any] = preds_str[index].find(_A )
UpperCAmelCase_ : List[str] = preds_str[index][:pred_eos]
UpperCAmelCase_ : Optional[int] = preds_index[index].cpu().tolist()
UpperCAmelCase_ : List[str] = pred_index.index(_A ) if eos_token in pred_index else -1
UpperCAmelCase_ : Tuple = preds_max_prob[index][: pred_eos_index + 1]
UpperCAmelCase_ : Optional[Any] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_A )
conf_scores.append(_A )
return dec_strs, conf_scores
def A ( self : Dict , _A : Optional[Any] ) -> List[str]:
UpperCAmelCase_ : Optional[int] = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_A )]
return decode_strs
def A ( self : Optional[int] , _A : str ) -> List[Any]:
return self.bpe_tokenizer.batch_decode(_A )
def A ( self : List[str] , _A : Union[str, Any] ) -> str:
UpperCAmelCase_ : Union[str, Any] = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_A )]
return decode_strs
| 216 |
'''simple docstring'''
_UpperCamelCase : Optional[int] = [
(1_000, 'M'),
(900, 'CM'),
(500, 'D'),
(400, 'CD'),
(100, 'C'),
(90, 'XC'),
(50, 'L'),
(40, 'XL'),
(10, 'X'),
(9, 'IX'),
(5, 'V'),
(4, 'IV'),
(1, 'I'),
]
def __UpperCAmelCase ( A : str ) -> int:
UpperCAmelCase_ : Union[str, Any] = {'''I''': 1, '''V''': 5, '''X''': 1_0, '''L''': 5_0, '''C''': 1_0_0, '''D''': 5_0_0, '''M''': 1_0_0_0}
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : Union[str, Any] = 0
while place < len(A ):
if (place + 1 < len(A )) and (vals[roman[place]] < vals[roman[place + 1]]):
total += vals[roman[place + 1]] - vals[roman[place]]
place += 2
else:
total += vals[roman[place]]
place += 1
return total
def __UpperCAmelCase ( A : int ) -> str:
UpperCAmelCase_ : Any = []
for arabic, roman in ROMAN:
((UpperCAmelCase_) , (UpperCAmelCase_)) : Dict = divmod(A , A )
result.append(roman * factor )
if number == 0:
break
return "".join(A )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 1 |
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = """EncodecFeatureExtractor"""
snake_case = ("""T5Tokenizer""", """T5TokenizerFast""")
def __init__( self : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : List[str] ):
'''simple docstring'''
super().__init__(__UpperCAmelCase , __UpperCAmelCase )
_A = self.feature_extractor
_A = False
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : int=True ):
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=__UpperCAmelCase , language=__UpperCAmelCase , no_timestamps=__UpperCAmelCase )
def __call__( self : Any , *__UpperCAmelCase : int , **__UpperCAmelCase : List[Any] ):
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCAmelCase , **__UpperCAmelCase )
_A = kwargs.pop("audio" , __UpperCAmelCase )
_A = kwargs.pop("sampling_rate" , __UpperCAmelCase )
_A = kwargs.pop("text" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
_A = args[0]
_A = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
_A = self.tokenizer(__UpperCAmelCase , **__UpperCAmelCase )
if audio is not None:
_A = self.feature_extractor(__UpperCAmelCase , *__UpperCAmelCase , sampling_rate=__UpperCAmelCase , **__UpperCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
_A = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
_A = audio_inputs["padding_mask"]
return inputs
def lowerCAmelCase ( self : Dict , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Tuple ):
'''simple docstring'''
_A = kwargs.pop("audio" , __UpperCAmelCase )
_A = kwargs.pop("padding_mask" , __UpperCAmelCase )
if len(__UpperCAmelCase ) > 0:
_A = args[0]
_A = args[1:]
if audio_values is not None:
return self._decode_audio(__UpperCAmelCase , padding_mask=__UpperCAmelCase )
else:
return self.tokenizer.batch_decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] , *__UpperCAmelCase : List[str] , **__UpperCAmelCase : Any ):
'''simple docstring'''
return self.tokenizer.decode(*__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[Any] = None ):
'''simple docstring'''
_A = to_numpy(__UpperCAmelCase )
_A , _A , _A = audio_values.shape
if padding_mask is None:
return list(__UpperCAmelCase )
_A = to_numpy(__UpperCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
_A = seq_len - padding_mask.shape[-1]
_A = 1 - self.feature_extractor.padding_value
_A = np.pad(__UpperCAmelCase , ((0, 0), (0, difference)) , "constant" , constant_values=__UpperCAmelCase )
_A = audio_values.tolist()
for i in range(__UpperCAmelCase ):
_A = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
_A = sliced_audio.reshape(__UpperCAmelCase , -1 )
return audio_values
| 330 | """simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def _lowerCamelCase( a , a = "cpu" , a = None ):
__a = torch.load(a , map_location=a )
for k, v in tqdm(state_dict.items() ):
if not isinstance(a , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
__a = v.half()
if save_path is None: # overwrite src_path
__a = src_path
torch.save(a , a )
if __name__ == "__main__":
fire.Fire(convert)
| 528 | 0 |
'''simple docstring'''
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if len(lowerCAmelCase_ ) < 2:
return collection
def circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_snake_case : List[str] = False
if low == high:
return swapped
_snake_case : Any = low
_snake_case : Optional[int] = high
while left < right:
if collection[left] > collection[right]:
_snake_case , _snake_case : Union[str, Any] = (
collection[right],
collection[left],
)
_snake_case : List[str] = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_snake_case , _snake_case : Optional[Any] = (
collection[right + 1],
collection[left],
)
_snake_case : List[Any] = True
_snake_case : Union[str, Any] = low + int((high - low) / 2 )
_snake_case : Dict = circle_sort_util(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Optional[Any] = circle_sort_util(lowerCAmelCase_ , mid + 1 , lowerCAmelCase_ )
return swapped or left_swap or right_swap
_snake_case : int = True
while is_not_sorted is True:
_snake_case : List[str] = circle_sort_util(lowerCAmelCase_ , 0 , len(lowerCAmelCase_ ) - 1 )
return collection
if __name__ == "__main__":
UpperCAmelCase : Optional[Any] = input('Enter numbers separated by a comma:\n').strip()
UpperCAmelCase : Optional[int] = [int(item) for item in user_input.split(',')]
print(circle_sort(unsorted))
| 47 |
'''simple docstring'''
from random import randint
from tempfile import TemporaryFile
import numpy as np
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : List[Any] = 0
if start < end:
_snake_case : List[Any] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Any = a[end]
_snake_case : List[str] = a[pivot]
_snake_case : Optional[int] = temp
_snake_case , _snake_case : List[Any] = _in_place_partition(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
count += _in_place_quick_sort(lowerCAmelCase_ , lowerCAmelCase_ , p - 1 )
count += _in_place_quick_sort(lowerCAmelCase_ , p + 1 , lowerCAmelCase_ )
return count
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = 0
_snake_case : Optional[int] = randint(lowerCAmelCase_ , lowerCAmelCase_ )
_snake_case : Tuple = a[end]
_snake_case : Optional[Any] = a[pivot]
_snake_case : Union[str, Any] = temp
_snake_case : Union[str, Any] = start - 1
for index in range(lowerCAmelCase_ , lowerCAmelCase_ ):
count += 1
if a[index] < a[end]: # check if current val is less than pivot value
_snake_case : Optional[int] = new_pivot_index + 1
_snake_case : Optional[Any] = a[new_pivot_index]
_snake_case : Tuple = a[index]
_snake_case : str = temp
_snake_case : Any = a[new_pivot_index + 1]
_snake_case : str = a[end]
_snake_case : Optional[int] = temp
return new_pivot_index + 1, count
UpperCAmelCase : Dict = TemporaryFile()
UpperCAmelCase : Dict = 1_0_0 # 1000 elements are to be sorted
UpperCAmelCase, UpperCAmelCase : str = 0, 1 # mean and standard deviation
UpperCAmelCase : Optional[Any] = np.random.normal(mu, sigma, p)
np.save(outfile, X)
print('The array is')
print(X)
outfile.seek(0) # using the same array
UpperCAmelCase : int = np.load(outfile)
UpperCAmelCase : Optional[int] = len(M) - 1
UpperCAmelCase : str = _in_place_quick_sort(M, 0, r)
print(
'No of Comparisons for 100 elements selected from a standard normal distribution'
'is :'
)
print(z)
| 47 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = MvpTokenizer
lowerCAmelCase_ : List[str] = MvpTokenizerFast
lowerCAmelCase_ : str = True
lowerCAmelCase_ : List[str] = filter_roberta_detectors
def A__ ( self ):
super().setUp()
UpperCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
UpperCAmelCase_ = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
UpperCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
UpperCAmelCase_ = {"unk_token": "<unk>"}
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(lowerCAmelCase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(lowerCAmelCase ) )
def A__ ( self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def A__ ( self , **lowerCAmelCase ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
return "lower newer", "lower newer"
@cached_property
def A__ ( self ):
return MvpTokenizer.from_pretrained("RUCAIBox/mvp" )
@cached_property
def A__ ( self ):
return MvpTokenizerFast.from_pretrained("RUCAIBox/mvp" )
@require_torch
def A__ ( self ):
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(lowerCAmelCase , max_length=len(lowerCAmelCase ) , padding=lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
UpperCAmelCase_ = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# Test that special tokens are reset
@require_torch
def A__ ( self ):
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors="pt" )
# check if input_ids are returned and no labels
self.assertIn("input_ids" , lowerCAmelCase )
self.assertIn("attention_mask" , lowerCAmelCase )
self.assertNotIn("labels" , lowerCAmelCase )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase )
@require_torch
def A__ ( self ):
UpperCAmelCase_ = [
"Summary of the text.",
"Another summary.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(text_target=lowerCAmelCase , max_length=32 , padding="max_length" , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
@require_torch
def A__ ( self ):
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(
["I am a small frog" * 1024, "I am a small frog"] , padding=lowerCAmelCase , truncation=lowerCAmelCase , return_tensors="pt" )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def A__ ( self ):
UpperCAmelCase_ = ["A long paragraph for summarization."]
UpperCAmelCase_ = [
"Summary of the text.",
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
UpperCAmelCase_ = tokenizer(lowerCAmelCase , text_target=lowerCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = inputs["input_ids"]
UpperCAmelCase_ = inputs["labels"]
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def A__ ( self ):
pass
def A__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
UpperCAmelCase_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = self.tokenizer_class.from_pretrained(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = "A, <mask> AllenNLP sentence."
UpperCAmelCase_ = tokenizer_r.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
UpperCAmelCase_ = tokenizer_p.encode_plus(lowerCAmelCase , add_special_tokens=lowerCAmelCase , return_token_type_ids=lowerCAmelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
UpperCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
UpperCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
lowerCAmelCase , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
| 579 |
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, PerceiverTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
SCREAMING_SNAKE_CASE = "pt"
elif is_tf_available():
SCREAMING_SNAKE_CASE = "tf"
else:
SCREAMING_SNAKE_CASE = "jax"
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = PerceiverTokenizer
lowerCAmelCase_ : List[Any] = False
def A__ ( self ):
super().setUp()
UpperCAmelCase_ = PerceiverTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A__ ( self ):
return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver" )
def A__ ( self , **lowerCAmelCase ):
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=False , lowerCAmelCase=20 , lowerCAmelCase=5 ):
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for Perceiver because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
UpperCAmelCase_ = []
for i in range(len(lowerCAmelCase ) ):
try:
UpperCAmelCase_ = tokenizer.decode([i] , clean_up_tokenization_spaces=lowerCAmelCase )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
UpperCAmelCase_ = list(filter(lambda lowerCAmelCase : re.match(r"^[ a-zA-Z]+$" , t[1] ) , lowerCAmelCase ) )
UpperCAmelCase_ = list(filter(lambda lowerCAmelCase : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=lowerCAmelCase ) , lowerCAmelCase ) )
if max_length is not None and len(lowerCAmelCase ) > max_length:
UpperCAmelCase_ = toks[:max_length]
if min_length is not None and len(lowerCAmelCase ) < min_length and len(lowerCAmelCase ) > 0:
while len(lowerCAmelCase ) < min_length:
UpperCAmelCase_ = toks + toks
# toks_str = [t[1] for t in toks]
UpperCAmelCase_ = [t[0] for t in toks]
# Ensure consistency
UpperCAmelCase_ = tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
if " " not in output_txt and len(lowerCAmelCase ) > 1:
UpperCAmelCase_ = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=lowerCAmelCase )
+ " "
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=lowerCAmelCase )
)
if with_prefix_space:
UpperCAmelCase_ = " " + output_txt
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
return output_txt, output_ids
def A__ ( self ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = "Unicode €."
UpperCAmelCase_ = tokenizer(lowerCAmelCase )
UpperCAmelCase_ = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5]
self.assertEqual(encoded["input_ids"] , lowerCAmelCase )
# decoding
UpperCAmelCase_ = tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , "[CLS]Unicode €.[SEP]" )
UpperCAmelCase_ = tokenizer("e è é ê ë" )
UpperCAmelCase_ = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5]
self.assertEqual(encoded["input_ids"] , lowerCAmelCase )
# decoding
UpperCAmelCase_ = tokenizer.decode(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , "[CLS]e è é ê ë[SEP]" )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë" ) ) , "[CLS]e è é ê ë[SEP]" )
def A__ ( self ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
# fmt: off
UpperCAmelCase_ = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0]
# fmt: on
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
if FRAMEWORK != "jax":
UpperCAmelCase_ = list(batch.input_ids.numpy()[0] )
else:
UpperCAmelCase_ = list(batch.input_ids.tolist()[0] )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertEqual((2, 38) , batch.input_ids.shape )
self.assertEqual((2, 38) , batch.attention_mask.shape )
def A__ ( self ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = ["A long paragraph for summarization.", "Another paragraph for summarization."]
UpperCAmelCase_ = tokenizer(lowerCAmelCase , padding=lowerCAmelCase , return_tensors=lowerCAmelCase )
# check if input_ids are returned and no decoder_input_ids
self.assertIn("input_ids" , lowerCAmelCase )
self.assertIn("attention_mask" , lowerCAmelCase )
self.assertNotIn("decoder_input_ids" , lowerCAmelCase )
self.assertNotIn("decoder_attention_mask" , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.perceiver_tokenizer
UpperCAmelCase_ = [
"Summary of the text.",
"Another summary.",
]
UpperCAmelCase_ = tokenizer(
text_target=lowerCAmelCase , max_length=32 , padding="max_length" , truncation=lowerCAmelCase , return_tensors=lowerCAmelCase )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def A__ ( self ):
# safety check on max_len default value so we are sure the test works
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase_ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
shutil.rmtree(lowerCAmelCase )
UpperCAmelCase_ = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase_ = tempfile.mkdtemp()
UpperCAmelCase_ = " He is very happy, UNwant\u00E9d,running"
tokenizer.add_tokens(["bim", "bambam"] )
UpperCAmelCase_ = tokenizer.additional_special_tokens
additional_special_tokens.append("new_additional_special_token" )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
tokenizer.save_pretrained(lowerCAmelCase )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = after_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
self.assertIn("new_additional_special_token" , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase_ = tokenizer.__class__.from_pretrained(lowerCAmelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
UpperCAmelCase_ = json.load(lowerCAmelCase )
UpperCAmelCase_ = [f'''<extra_id_{i}>''' for i in range(125 )]
UpperCAmelCase_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
UpperCAmelCase_ = added_tokens_extra_ids + [
"an_additional_special_token"
]
with open(os.path.join(lowerCAmelCase , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
with open(os.path.join(lowerCAmelCase , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(lowerCAmelCase , lowerCAmelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase_ = tokenizer_class.from_pretrained(
lowerCAmelCase , )
self.assertIn(
"an_additional_special_token" , tokenizer_without_change_in_init.additional_special_tokens )
self.assertEqual(
["an_additional_special_token"] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase_ = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token" , lstrip=lowerCAmelCase )]
UpperCAmelCase_ = tokenizer_class.from_pretrained(
lowerCAmelCase , additional_special_tokens=lowerCAmelCase , )
self.assertIn("a_new_additional_special_token" , tokenizer.additional_special_tokens )
self.assertEqual(
["a_new_additional_special_token"] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"] ) ) , )
def A__ ( self ):
UpperCAmelCase_ = self.perceiver_tokenizer
self.assertEqual(tokenizer.decode([178] ) , "�" )
def A__ ( self ):
pass
def A__ ( self ):
pass
def A__ ( self ):
pass
def A__ ( self ):
pass
def A__ ( self ):
# The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character
# strings and special added tokens as tokens
UpperCAmelCase_ = self.get_tokenizers(fast=lowerCAmelCase , do_lower_case=lowerCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase_ = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"]
UpperCAmelCase_ = tokenizer.convert_tokens_to_string(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
| 579 | 1 |
from __future__ import annotations
from typing import Any
class a__ :
'''simple docstring'''
def __init__( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = 0 ) -> None:
lowerCAmelCase__ = row, column
lowerCAmelCase__ = [[default_value for c in range(lowerCamelCase_ )] for r in range(lowerCamelCase_ )]
def __str__( self ) -> str:
lowerCAmelCase__ = F"""Matrix consist of {self.row} rows and {self.column} columns\n"""
# Make string identifier
lowerCAmelCase__ = 0
for row_vector in self.array:
for obj in row_vector:
lowerCAmelCase__ = max(lowerCamelCase_ , len(str(lowerCamelCase_ ) ) )
lowerCAmelCase__ = F"""%{max_element_length}s"""
# Make string and return
def single_line(lowerCamelCase_ ) -> str:
nonlocal string_format_identifier
lowerCAmelCase__ = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowerCamelCase_ ) for row_vector in self.array )
return s
def __repr__( self ) -> str:
return str(self )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> bool:
if not (isinstance(lowerCamelCase_ , (list, tuple) ) and len(lowerCamelCase_ ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self , lowerCamelCase_ ) -> Any:
assert self.validate_indicies(lowerCamelCase_ )
return self.array[loc[0]][loc[1]]
def __setitem__( self , lowerCamelCase_ , lowerCamelCase_ ) -> None:
assert self.validate_indicies(lowerCamelCase_ )
lowerCAmelCase__ = value
def __add__( self , lowerCamelCase_ ) -> Matrix:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert self.row == another.row and self.column == another.column
# Add
lowerCAmelCase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ = self[r, c] + another[r, c]
return result
def __neg__( self ) -> Matrix:
lowerCAmelCase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ = -self[r, c]
return result
def __sub__( self , lowerCamelCase_ ) -> Matrix:
return self + (-another)
def __mul__( self , lowerCamelCase_ ) -> Matrix:
if isinstance(lowerCamelCase_ , (int, float) ): # Scalar multiplication
lowerCAmelCase__ = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ = self[r, c] * another
return result
elif isinstance(lowerCamelCase_ , lowerCamelCase_ ): # Matrix multiplication
assert self.column == another.row
lowerCAmelCase__ = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
lowerCAmelCase__ = F"""Unsupported type given for another ({type(lowerCamelCase_ )})"""
raise TypeError(lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self ) -> Matrix:
lowerCAmelCase__ = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
lowerCAmelCase__ = self[r, c]
return result
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any:
assert isinstance(lowerCamelCase_ , lowerCamelCase_ ) and isinstance(lowerCamelCase_ , lowerCamelCase_ )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
lowerCAmelCase__ = v.transpose()
lowerCAmelCase__ = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def _snake_case ( ) -> int:
lowerCAmelCase__ = Matrix(3 , 3 , 0 )
for i in range(3 ):
lowerCAmelCase__ = 1
print(F"""a^(-1) is {ainv}""" )
# u, v
lowerCAmelCase__ = Matrix(3 , 1 , 0 )
lowerCAmelCase__ = 1, 2, -3
lowerCAmelCase__ = Matrix(3 , 1 , 0 )
lowerCAmelCase__ = 4, -2, 5
print(F"""u is {u}""" )
print(F"""v is {v}""" )
print(F"""uv^T is {u * v.transpose()}""" )
# Sherman Morrison
print(F"""(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}""" )
def _snake_case ( ) -> Any:
import doctest
doctest.testmod()
testa() | 704 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__UpperCAmelCase = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _snake_case ( A , A , A , A , A ) -> Optional[Any]:
for attribute in key.split('''.''' ):
lowerCAmelCase__ = getattr(A , A )
if weight_type is not None:
lowerCAmelCase__ = getattr(A , A ).shape
else:
lowerCAmelCase__ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
lowerCAmelCase__ = value
elif weight_type == "weight_g":
lowerCAmelCase__ = value
elif weight_type == "weight_v":
lowerCAmelCase__ = value
elif weight_type == "bias":
lowerCAmelCase__ = value
elif weight_type == "running_mean":
lowerCAmelCase__ = value
elif weight_type == "running_var":
lowerCAmelCase__ = value
elif weight_type == "num_batches_tracked":
lowerCAmelCase__ = value
elif weight_type == "inv_freq":
lowerCAmelCase__ = value
else:
lowerCAmelCase__ = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def _snake_case ( A , A , A ) -> Any:
lowerCAmelCase__ = []
lowerCAmelCase__ = fairseq_model.state_dict()
lowerCAmelCase__ = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
lowerCAmelCase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
lowerCAmelCase__ = True
else:
for key, mapped_key in MAPPING.items():
lowerCAmelCase__ = '''wav2vec2_conformer.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowerCAmelCase__ = True
if "*" in mapped_key:
lowerCAmelCase__ = name.split(A )[0].split('''.''' )[-2]
lowerCAmelCase__ = mapped_key.replace('''*''' , A )
if "pos_bias_u" in name:
lowerCAmelCase__ = None
elif "pos_bias_v" in name:
lowerCAmelCase__ = None
elif "weight_g" in name:
lowerCAmelCase__ = '''weight_g'''
elif "weight_v" in name:
lowerCAmelCase__ = '''weight_v'''
elif "bias" in name:
lowerCAmelCase__ = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowerCAmelCase__ = '''weight'''
elif "running_mean" in name:
lowerCAmelCase__ = '''running_mean'''
elif "inv_freq" in name:
lowerCAmelCase__ = '''inv_freq'''
elif "running_var" in name:
lowerCAmelCase__ = '''running_var'''
elif "num_batches_tracked" in name:
lowerCAmelCase__ = '''num_batches_tracked'''
else:
lowerCAmelCase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def _snake_case ( A , A , A , A , A ) -> Tuple:
lowerCAmelCase__ = full_name.split('''conv_layers.''' )[-1]
lowerCAmelCase__ = name.split('''.''' )
lowerCAmelCase__ = int(items[0] )
lowerCAmelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
lowerCAmelCase__ = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(A )
@torch.no_grad()
def _snake_case ( A , A , A=None , A=None , A=True ) -> Optional[int]:
if config_path is not None:
lowerCAmelCase__ = WavaVecaConformerConfig.from_pretrained(A , hidden_act='''swish''' )
else:
lowerCAmelCase__ = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
lowerCAmelCase__ = '''rotary'''
if is_finetuned:
if dict_path:
lowerCAmelCase__ = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCAmelCase__ = target_dict.pad_index
lowerCAmelCase__ = target_dict.bos_index
lowerCAmelCase__ = target_dict.eos_index
lowerCAmelCase__ = len(target_dict.symbols )
lowerCAmelCase__ = os.path.join(A , '''vocab.json''' )
if not os.path.isdir(A ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(A ) )
return
os.makedirs(A , exist_ok=A )
lowerCAmelCase__ = target_dict.indices
# fairseq has the <pad> and <s> switched
lowerCAmelCase__ = 0
lowerCAmelCase__ = 1
with open(A , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(A , A )
lowerCAmelCase__ = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=A , )
lowerCAmelCase__ = True if config.feat_extract_norm == '''layer''' else False
lowerCAmelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
lowerCAmelCase__ = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
lowerCAmelCase__ = WavaVecaConformerForCTC(A )
else:
lowerCAmelCase__ = WavaVecaConformerForPreTraining(A )
if is_finetuned:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowerCAmelCase__ = argparse.Namespace(task='''audio_pretraining''' )
lowerCAmelCase__ = fairseq.tasks.setup_task(A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=A )
lowerCAmelCase__ = model[0].eval()
recursively_load_weights(A , A , not is_finetuned )
hf_wavavec.save_pretrained(A )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__UpperCAmelCase = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 98 | 0 |
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(UpperCAmelCase__ ) )
def __UpperCAmelCase( lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
# Base Case
if index == len(UpperCAmelCase__ ):
return True
# Recursive Step
for i in range(UpperCAmelCase__ ):
if valid_coloring(graph[index] , UpperCAmelCase__ , UpperCAmelCase__ ):
# Color current vertex
_lowerCamelCase : Any = i
# Validate coloring
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , index + 1 ):
return True
# Backtrack
_lowerCamelCase : int = -1
return False
def __UpperCAmelCase( lowercase_ , lowercase_ ):
_lowerCamelCase : Any = [-1] * len(UpperCAmelCase__ )
if util_color(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , 0 ):
return colored_vertices
return []
| 114 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class A__ ( _snake_case ):
lowercase = ["image_processor", "tokenizer"]
lowercase = "OwlViTImageProcessor"
lowercase = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ ) -> str:
'''simple docstring'''
A_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , UpperCamelCase__ , )
A_ = kwargs.pop("""feature_extractor""" )
A_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(UpperCamelCase__ , UpperCamelCase__ )
def __call__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__="max_length" , UpperCamelCase__="np" , **UpperCamelCase__ ) -> int:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) or (isinstance(UpperCamelCase__ , UpperCamelCase__ ) and not isinstance(text[0] , UpperCamelCase__ )):
A_ = [self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )]
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(text[0] , UpperCamelCase__ ):
A_ = []
# Maximum number of queries across batch
A_ = max([len(UpperCamelCase__ ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(UpperCamelCase__ ) != max_num_queries:
A_ = t + [""" """] * (max_num_queries - len(UpperCamelCase__ ))
A_ = self.tokenizer(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
encodings.append(UpperCamelCase__ )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
A_ = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A_ = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
A_ = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A_ = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
A_ = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
A_ = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
A_ = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
A_ = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
A_ = BatchEncoding()
A_ = input_ids
A_ = attention_mask
if query_images is not None:
A_ = BatchEncoding()
A_ = self.image_processor(
UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ).pixel_values
A_ = query_pixel_values
if images is not None:
A_ = self.image_processor(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ )
if text is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
A_ = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCamelCase__ ) , tensor_type=UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> int:
'''simple docstring'''
return self.image_processor.post_process(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ )
def snake_case_ ( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ )
@property
def snake_case_ ( self ) -> Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase__ , )
return self.image_processor_class
@property
def snake_case_ ( self ) -> List[str]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase__ , )
return self.image_processor
| 288 | 0 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a :
"""simple docstring"""
def __init__( self , snake_case_ , snake_case_=13 , snake_case_=30 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=32 , snake_case_=2 , snake_case_=4 , snake_case_=37 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=10 , snake_case_=0.02 , snake_case_=3 , snake_case_=0.6 , snake_case_=None , ) -> str:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = mask_ratio
_UpperCAmelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __A ( self ) -> Any:
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case_ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
_UpperCAmelCase = TFViTMAEModel(config=snake_case_ )
_UpperCAmelCase = model(snake_case_ , training=snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> List[str]:
_UpperCAmelCase = TFViTMAEForPreTraining(snake_case_ )
_UpperCAmelCase = model(snake_case_ , training=snake_case_ )
# expected sequence length = num_patches
_UpperCAmelCase = (self.image_size // self.patch_size) ** 2
_UpperCAmelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = TFViTMAEForPreTraining(snake_case_ )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(snake_case_ , training=snake_case_ )
_UpperCAmelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class a ( _SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
A__ : List[str] = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {}
A__ : str = False
A__ : Dict = False
A__ : List[Any] = False
A__ : Dict = False
def __A ( self ) -> Any:
_UpperCAmelCase = TFViTMAEModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=37 )
def __A ( self ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __A ( self ) -> Optional[int]:
pass
def __A ( self ) -> str:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , tf.keras.layers.Layer ) )
def __A ( self ) -> int:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __A ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __A ( self ) -> str:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case_ )
def __A ( self ) -> List[str]:
# make the mask reproducible
np.random.seed(2 )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ )
_UpperCAmelCase = model(snake_case_ , noise=snake_case_ )
_UpperCAmelCase = copy.deepcopy(self._prepare_for_class(snake_case_ , snake_case_ ) )
_UpperCAmelCase = model(**snake_case_ , noise=snake_case_ )
_UpperCAmelCase = outputs_dict[0].numpy()
_UpperCAmelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def __A ( self ) -> List[Any]:
# make the mask reproducible
np.random.seed(2 )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(snake_case_ ):
_UpperCAmelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(snake_case_ ):
_UpperCAmelCase = v.numpy()
else:
_UpperCAmelCase = np.array(snake_case_ )
return inputs_np_dict
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ )
_UpperCAmelCase = prepare_numpy_arrays(snake_case_ )
_UpperCAmelCase = model(snake_case_ , noise=snake_case_ )
_UpperCAmelCase = model(**snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
def __A ( self , snake_case_ , snake_case_ , snake_case_ ) -> Dict:
# make masks reproducible
np.random.seed(2 )
_UpperCAmelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCAmelCase = tf.constant(snake_case_ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_UpperCAmelCase = tf_noise
super().check_pt_tf_models(snake_case_ , snake_case_ , snake_case_ )
def __A ( self ) -> Optional[int]:
# make mask reproducible
np.random.seed(2 )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(snake_case_ )
if module_member_name.endswith("MainLayer" )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len("MainLayer" )] == model_class.__name__[: -len("Model" )]
for module_member in (getattr(snake_case_ , snake_case_ ),)
if isinstance(snake_case_ , snake_case_ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(snake_case_ , "_keras_serializable" , snake_case_ )
}
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_UpperCAmelCase = tf.convert_to_tensor(snake_case_ )
inputs_dict.update({"noise": noise} )
for main_layer_class in tf_main_layer_classes:
_UpperCAmelCase = main_layer_class(snake_case_ )
_UpperCAmelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_UpperCAmelCase = tf.keras.Model(snake_case_ , outputs=main_layer(snake_case_ ) )
_UpperCAmelCase = model(snake_case_ )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = os.path.join(snake_case_ , "keras_model.h5" )
model.save(snake_case_ )
_UpperCAmelCase = tf.keras.models.load_model(
snake_case_ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(snake_case_ , tf.keras.Model )
_UpperCAmelCase = model(snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@slow
def __A ( self ) -> List[str]:
# make mask reproducible
np.random.seed(2 )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ )
_UpperCAmelCase = model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
_UpperCAmelCase = outputs.last_hidden_state.numpy()
_UpperCAmelCase = 0
else:
_UpperCAmelCase = outputs.logits.numpy()
_UpperCAmelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case_ , saved_model=snake_case_ )
_UpperCAmelCase = model_class.from_pretrained(snake_case_ )
_UpperCAmelCase = model(snake_case_ , noise=snake_case_ )
if model_class.__name__ == "TFViTMAEModel":
_UpperCAmelCase = after_outputs["last_hidden_state"].numpy()
_UpperCAmelCase = 0
else:
_UpperCAmelCase = after_outputs["logits"].numpy()
_UpperCAmelCase = 0
_UpperCAmelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(snake_case_ , 1e-5 )
def __A ( self ) -> Dict:
# make mask reproducible
np.random.seed(2 )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = int((config.image_size // config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case_ )
_UpperCAmelCase = self._prepare_for_class(snake_case_ , snake_case_ )
_UpperCAmelCase = model(snake_case_ , noise=snake_case_ )
_UpperCAmelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(snake_case_ )
_UpperCAmelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_UpperCAmelCase = model_class.from_config(model.config )
_UpperCAmelCase = new_model(snake_case_ ) # Build model
new_model.set_weights(model.get_weights() )
_UpperCAmelCase = new_model(snake_case_ , noise=snake_case_ )
self.assert_outputs_same(snake_case_ , snake_case_ )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __A ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __A ( self ) -> Optional[int]:
pass
@slow
def __A ( self ) -> Optional[Any]:
_UpperCAmelCase = TFViTMAEModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(snake_case_ )
def A__ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __A ( self ) -> int:
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __A ( self ) -> Tuple:
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_UpperCAmelCase = TFViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case_ , return_tensors="tf" )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_UpperCAmelCase = ViTMAEConfig()
_UpperCAmelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_UpperCAmelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_UpperCAmelCase = model(**snake_case_ , noise=snake_case_ )
# verify the logits
_UpperCAmelCase = tf.convert_to_tensor([1, 196, 768] )
self.assertEqual(outputs.logits.shape , snake_case_ )
_UpperCAmelCase = tf.convert_to_tensor(
[[-0.05_48, -1.70_23, -0.93_25], [0.37_21, -0.56_70, -0.22_33], [0.82_35, -1.38_78, -0.35_24]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , snake_case_ , atol=1e-4 )
| 579 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''studio-ousia/luke-base''': '''https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json''',
'''studio-ousia/luke-large''': '''https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json''',
}
class a ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
A__ : int = "luke"
def __init__( self , snake_case_=50267 , snake_case_=500000 , snake_case_=768 , snake_case_=256 , snake_case_=12 , snake_case_=12 , snake_case_=3072 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=512 , snake_case_=2 , snake_case_=0.02 , snake_case_=1e-1_2 , snake_case_=True , snake_case_=None , snake_case_=1 , snake_case_=0 , snake_case_=2 , **snake_case_ , ) -> Any:
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = entity_vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = entity_emb_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = use_entity_aware_attention
_UpperCAmelCase = classifier_dropout
| 579 | 1 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
A__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
super().__init__()
self.register_modules(unet=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE_ = 1 , SCREAMING_SNAKE_CASE_ = 1_00 , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
__lowerCamelCase : int = self.unet.config.sample_size / self.unet.config.sample_rate
__lowerCamelCase : Any = audio_length_in_s * self.unet.config.sample_rate
__lowerCamelCase : Optional[Any] = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'
f' {3 * down_scale_factor / self.unet.config.sample_rate}.' )
__lowerCamelCase : Tuple = int(SCREAMING_SNAKE_CASE_ )
if sample_size % down_scale_factor != 0:
__lowerCamelCase : Dict = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'
f' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'
' process.' )
__lowerCamelCase : List[Any] = int(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = next(iter(self.unet.parameters() ) ).dtype
__lowerCamelCase : str = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(SCREAMING_SNAKE_CASE_ )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
__lowerCamelCase : str = randn_tensor(SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , device=self.device , dtype=SCREAMING_SNAKE_CASE_ )
# set step values
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE_ , device=audio.device )
__lowerCamelCase : Tuple = self.scheduler.timesteps.to(SCREAMING_SNAKE_CASE_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__lowerCamelCase : Optional[Any] = self.unet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).sample
# 2. compute previous image: x_t -> t_t-1
__lowerCamelCase : Optional[Any] = self.scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).prev_sample
__lowerCamelCase : List[str] = audio.clamp(-1 , 1 ).float().cpu().numpy()
__lowerCamelCase : Tuple = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=SCREAMING_SNAKE_CASE_ )
| 13 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : List[str] = logging.get_logger(__name__)
# TODO Update this
A__ : Tuple = {
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class UpperCAmelCase_ (_UpperCAmelCase ):
"""simple docstring"""
lowerCamelCase : Tuple = 'esm'
def __init__( self , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=7_68 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=12 , SCREAMING_SNAKE_CASE_=30_72 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=10_26 , SCREAMING_SNAKE_CASE_=0.0_2 , SCREAMING_SNAKE_CASE_=1E-12 , SCREAMING_SNAKE_CASE_="absolute" , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , mask_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : int = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : List[str] = num_attention_heads
__lowerCamelCase : Any = intermediate_size
__lowerCamelCase : Optional[Any] = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Optional[int] = max_position_embeddings
__lowerCamelCase : str = initializer_range
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[str] = position_embedding_type
__lowerCamelCase : int = use_cache
__lowerCamelCase : Optional[Any] = emb_layer_norm_before
__lowerCamelCase : Optional[Any] = token_dropout
__lowerCamelCase : str = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('No esmfold_config supplied for folding model, using default values.' )
__lowerCamelCase : Dict = EsmFoldConfig()
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[int] = EsmFoldConfig(**SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = esmfold_config
if vocab_list is None:
logger.warning('No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!' )
__lowerCamelCase : List[str] = get_default_vocab_list()
else:
__lowerCamelCase : Optional[Any] = vocab_list
else:
__lowerCamelCase : Dict = None
__lowerCamelCase : Optional[Any] = None
if self.esmfold_config is not None and getattr(self.esmfold_config , 'use_esm_attn_map' , SCREAMING_SNAKE_CASE_ ):
raise ValueError('The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!' )
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = super().to_dict()
if isinstance(self.esmfold_config , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : int = self.esmfold_config.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : str = None
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : float = 0
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : int = 1_2_8
lowerCamelCase : "TrunkConfig" = None
def lowercase_ ( self ) -> Any:
if self.trunk is None:
__lowerCamelCase : List[str] = TrunkConfig()
elif isinstance(self.trunk , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Any = TrunkConfig(**self.trunk )
def lowercase_ ( self ) -> int:
__lowerCamelCase : Optional[int] = asdict(self )
__lowerCamelCase : str = self.trunk.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 4_8
lowerCamelCase : int = 1_0_2_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : int = 3_2
lowerCamelCase : float = 0
lowerCamelCase : float = 0
lowerCamelCase : bool = False
lowerCamelCase : int = 4
lowerCamelCase : Optional[int] = 1_2_8
lowerCamelCase : "StructureModuleConfig" = None
def lowercase_ ( self ) -> Optional[int]:
if self.structure_module is None:
__lowerCamelCase : Dict = StructureModuleConfig()
elif isinstance(self.structure_module , SCREAMING_SNAKE_CASE_ ):
__lowerCamelCase : Optional[Any] = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'
f' {self.sequence_state_dim} and {self.sequence_state_dim}.' )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'
f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' )
__lowerCamelCase : Tuple = self.sequence_state_dim // self.sequence_head_width
__lowerCamelCase : str = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'
f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'
f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' )
if self.dropout >= 0.4:
raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' )
def lowercase_ ( self ) -> List[Any]:
__lowerCamelCase : List[str] = asdict(self )
__lowerCamelCase : int = self.structure_module.to_dict()
return output
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowerCamelCase : int = 3_8_4
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_6
lowerCamelCase : int = 1_2_8
lowerCamelCase : int = 1_2
lowerCamelCase : int = 4
lowerCamelCase : int = 8
lowerCamelCase : float = 0.1
lowerCamelCase : int = 8
lowerCamelCase : int = 1
lowerCamelCase : int = 2
lowerCamelCase : int = 7
lowerCamelCase : int = 1_0
lowerCamelCase : float = 1e-8
lowerCamelCase : float = 1e5
def lowercase_ ( self ) -> Any:
return asdict(self )
def UpperCAmelCase__ ( ) -> Optional[Any]:
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 13 | 1 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class a__ ( _snake_case ):
"""simple docstring"""
def __init__( self :Optional[Any] , *lowercase__ :Any , **lowercase__ :List[Any] ):
warnings.warn(
'The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use DeiTImageProcessor instead.' , lowercase__ , )
super().__init__(*lowercase__ , **lowercase__ )
| 314 |
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __snake_case ( _UpperCAmelCase ):
"""simple docstring"""
lowercase = int(number**0.5 )
return number == sq * sq
def __snake_case ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
lowercase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowercase = x_den * y_den * z_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __snake_case ( _UpperCAmelCase = 35 ):
"""simple docstring"""
lowercase = set()
lowercase = 42
lowercase = Fraction(0 )
lowercase = 42
for x_num in range(1 , order + 1 ):
for x_den in range(x_num + 1 , order + 1 ):
for y_num in range(1 , order + 1 ):
for y_den in range(y_num + 1 , order + 1 ):
# n=1
lowercase = x_num * y_den + x_den * y_num
lowercase = x_den * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowercase = x_den * x_den * y_den * y_den
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=-1
lowercase = x_num * y_num
lowercase = x_den * y_num + x_num * y_den
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
# n=2
lowercase = x_num * x_num * y_num * y_num
lowercase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_UpperCAmelCase ) and is_sq(_UpperCAmelCase ):
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = int(sqrt(_UpperCAmelCase ) )
lowercase = gcd(_UpperCAmelCase , _UpperCAmelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowercase = add_three(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
unique_s.add(_UpperCAmelCase )
for num, den in unique_s:
total += Fraction(_UpperCAmelCase , _UpperCAmelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(F"""{solution() = }""")
| 314 | 1 |
import numpy
# List of input, output pairs
lowerCamelCase__ = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowerCamelCase__ = (((515, 22, 13), 555), ((61, 35, 49), 150))
lowerCamelCase__ = [2, 4, 1, 5]
lowerCamelCase__ = len(train_data)
lowerCamelCase__ = 0.0_09
def __A(lowerCAmelCase , lowerCAmelCase="train" ) -> Any:
"""simple docstring"""
return calculate_hypothesis_value(lowerCAmelCase , lowerCAmelCase ) - output(
lowerCAmelCase , lowerCAmelCase )
def __A(lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = 0
for i in range(len(lowerCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __A(lowerCAmelCase , lowerCAmelCase ) -> Dict:
"""simple docstring"""
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __A(lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __A(lowerCAmelCase , lowerCAmelCase=m ) -> int:
"""simple docstring"""
_UpperCamelCase = 0
for i in range(lowerCAmelCase ):
if index == -1:
summation_value += _error(lowerCAmelCase )
else:
summation_value += _error(lowerCAmelCase ) * train_data[i][0][index]
return summation_value
def __A(lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = summation_of_cost_derivative(lowerCAmelCase , lowerCAmelCase ) / m
return cost_derivative_value
def __A() -> List[str]:
"""simple docstring"""
global parameter_vector
# Tune these values to set a tolerance value for predicted output
_UpperCamelCase = 0.000002
_UpperCamelCase = 0
_UpperCamelCase = 0
while True:
j += 1
_UpperCamelCase = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase ) ):
_UpperCamelCase = get_cost_derivative(i - 1 )
_UpperCamelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase , lowerCAmelCase , atol=lowerCAmelCase , rtol=lowerCAmelCase , ):
break
_UpperCamelCase = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __A() -> Union[str, Any]:
"""simple docstring"""
for i in range(len(lowerCAmelCase ) ):
print(("""Actual output value:""", output(lowerCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(lowerCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 612 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class lowerCAmelCase__ ( __lowercase ):
def __init__( self , a=0.01 , a=10_00 ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = p_stop
_UpperCamelCase = max_length
def __iter__( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = 0
_UpperCamelCase = False
while not stop and count < self.max_length:
yield count
count += 1
_UpperCamelCase = random.random() < self.p_stop
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self , a , a , a=False , a=True ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = [
BatchSamplerShard(a , 2 , a , split_batches=a , even_batches=a )
for i in range(2 )
]
_UpperCamelCase = [list(a ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(a ) for shard in batch_sampler_shards] , [len(a ) for e in expected] )
self.assertListEqual(a , a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(a , a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(a , a , split_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a )
def A_ ( self ) -> str:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=3 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(20 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(a , a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=3 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , even_batches=a )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(24 ) , batch_size=4 , drop_last=a )
# Expected shouldn't change
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size.
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(22 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(21 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
# Check the shards when the dataset is very small.
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[[0, 1]], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
_UpperCamelCase = BatchSampler(range(2 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = [[], []]
self.check_batch_sampler_shards(a , a , split_batches=a , even_batches=a )
def A_ ( self ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
_UpperCamelCase = [BatchSamplerShard(a , 2 , a , even_batches=a ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def A_ ( self , a , a , a , a=False , a=2 , a=False ) -> Any:
'''simple docstring'''
random.seed(a )
_UpperCamelCase = list(a )
_UpperCamelCase = [
IterableDatasetShard(
a , batch_size=a , drop_last=a , num_processes=a , process_index=a , split_batches=a , )
for i in range(a )
]
_UpperCamelCase = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(a )
iterable_dataset_lists.append(list(a ) )
_UpperCamelCase = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
_UpperCamelCase = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(a ) , len(a ) )
self.assertTrue(len(a ) % shard_batch_size == 0 )
_UpperCamelCase = []
for idx in range(0 , len(a ) , a ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(a ) < len(a ):
reference += reference
self.assertListEqual(a , reference[: len(a )] )
def A_ ( self ) -> int:
'''simple docstring'''
_UpperCamelCase = 42
_UpperCamelCase = RandomIterableDataset()
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
# Edge case with a very small dataset
_UpperCamelCase = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
self.check_iterable_dataset_shards(a , a , batch_size=4 , drop_last=a , split_batches=a )
def A_ ( self ) -> Any:
'''simple docstring'''
_UpperCamelCase = BatchSampler(range(16 ) , batch_size=4 , drop_last=a )
_UpperCamelCase = SkipBatchSampler(a , 2 )
self.assertListEqual(list(a ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Dict:
'''simple docstring'''
_UpperCamelCase = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> Tuple:
'''simple docstring'''
_UpperCamelCase = DataLoader(list(range(16 ) ) , batch_size=4 )
_UpperCamelCase = skip_first_batches(a , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def A_ ( self ) -> List[str]:
'''simple docstring'''
_UpperCamelCase = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def A_ ( self ) -> Optional[Any]:
'''simple docstring'''
Accelerator()
_UpperCamelCase = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(a ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 612 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
_lowerCamelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Optional[int] = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_SCREAMING_SNAKE_CASE : int = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : Any = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_SCREAMING_SNAKE_CASE : List[Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_SCREAMING_SNAKE_CASE : List[Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
_SCREAMING_SNAKE_CASE : int = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
_SCREAMING_SNAKE_CASE : Any = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Tuple = field(default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The input training data file (a text file)."} )
_SCREAMING_SNAKE_CASE : str = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
_SCREAMING_SNAKE_CASE : Tuple = field(
default=SCREAMING_SNAKE_CASE_ , metadata={"help": "The number of processes to use for the preprocessing."} , )
_SCREAMING_SNAKE_CASE : Any = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_SCREAMING_SNAKE_CASE : Union[str, Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
} , )
_SCREAMING_SNAKE_CASE : Optional[Any] = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
_SCREAMING_SNAKE_CASE : Dict = field(
default=SCREAMING_SNAKE_CASE_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def lowerCAmelCase__ ( self ):
if self.train_file is not None:
a =self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
a =self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class UpperCAmelCase__ :
'''simple docstring'''
_SCREAMING_SNAKE_CASE : Any = 42
_SCREAMING_SNAKE_CASE : Dict = True
_SCREAMING_SNAKE_CASE : str = None
_SCREAMING_SNAKE_CASE : Union[str, Any] = None
def __call__( self , _lowerCAmelCase ):
a ='''label''' if '''label''' in features[0].keys() else '''labels'''
a =[feature.pop(UpperCamelCase__ ) for feature in features]
a =len(UpperCamelCase__ )
a =len(features[0]["""input_ids"""] )
a =[
[{k: v[i] for k, v in feature.items()} for i in range(UpperCamelCase__ )] for feature in features
]
a =list(chain(*UpperCamelCase__ ) )
a =self.tokenizer.pad(
UpperCamelCase__ , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" , )
# Un-flatten
a ={k: v.view(UpperCamelCase__ , UpperCamelCase__ , -1 ) for k, v in batch.items()}
# Add back labels
a =torch.tensor(UpperCamelCase__ , dtype=torch.intaa )
return batch
def lowerCamelCase ( )-> int:
a =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
a =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
a =parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_swag""" , __UpperCamelCase , __UpperCamelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
a =training_args.get_process_log_level()
logger.setLevel(__UpperCamelCase )
datasets.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.set_verbosity(__UpperCamelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
a =None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
a =get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
a ={}
if data_args.train_file is not None:
a =data_args.train_file
if data_args.validation_file is not None:
a =data_args.validation_file
a =data_args.train_file.split(""".""" )[-1]
a =load_dataset(
__UpperCamelCase , data_files=__UpperCamelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
a =load_dataset(
"""swag""" , """regular""" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
a =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
a =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
a =[F'''ending{i}''' for i in range(4 )]
a ='''sent1'''
a ='''sent2'''
if data_args.max_seq_length is None:
a =tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"""The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"""
""" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"""
""" override this default with `--block_size xxx`.""" )
a =1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
a =min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(UpperCAmelCase_ : int ):
a =[[context] * 4 for context in examples[context_name]]
a =examples[question_header_name]
a =[
[F'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__UpperCamelCase )
]
# Flatten out
a =list(chain(*__UpperCamelCase ) )
a =list(chain(*__UpperCamelCase ) )
# Tokenize
a =tokenizer(
__UpperCamelCase , __UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , padding="""max_length""" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__UpperCamelCase ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
a =raw_datasets['''train''']
if data_args.max_train_samples is not None:
a =min(len(__UpperCamelCase ) , data_args.max_train_samples )
a =train_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="""train dataset map pre-processing""" ):
a =train_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
a =raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
a =min(len(__UpperCamelCase ) , data_args.max_eval_samples )
a =eval_dataset.select(range(__UpperCamelCase ) )
with training_args.main_process_first(desc="""validation dataset map pre-processing""" ):
a =eval_dataset.map(
__UpperCamelCase , batched=__UpperCamelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
a =(
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__UpperCamelCase , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(UpperCAmelCase_ : List[Any] ):
a =eval_predictions
a =np.argmax(__UpperCamelCase , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
a =Trainer(
model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__UpperCamelCase , data_collator=__UpperCamelCase , compute_metrics=__UpperCamelCase , )
# Training
if training_args.do_train:
a =None
if training_args.resume_from_checkpoint is not None:
a =training_args.resume_from_checkpoint
elif last_checkpoint is not None:
a =last_checkpoint
a =trainer.train(resume_from_checkpoint=__UpperCamelCase )
trainer.save_model() # Saves the tokenizer too for easy upload
a =train_result.metrics
a =(
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCamelCase )
)
a =min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.log_metrics("""train""" , __UpperCamelCase )
trainer.save_metrics("""train""" , __UpperCamelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
a =trainer.evaluate()
a =data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCamelCase )
a =min(__UpperCamelCase , len(__UpperCamelCase ) )
trainer.log_metrics("""eval""" , __UpperCamelCase )
trainer.save_metrics("""eval""" , __UpperCamelCase )
a ={
'''finetuned_from''': model_args.model_name_or_path,
'''tasks''': '''multiple-choice''',
'''dataset_tags''': '''swag''',
'''dataset_args''': '''regular''',
'''dataset''': '''SWAG''',
'''language''': '''en''',
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCamelCase )
else:
trainer.create_model_card(**__UpperCamelCase )
def lowerCamelCase ( UpperCAmelCase_ : int )-> Tuple:
main()
if __name__ == "__main__":
main()
| 711 |
from __future__ import annotations
def lowerCamelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> None:
"""simple docstring"""
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
a , a =array[indexa], array[indexa]
def lowerCamelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> None:
"""simple docstring"""
if length > 1:
a =int(length / 2 )
for i in range(UpperCAmelCase_ , low + middle ):
comp_and_swap(UpperCAmelCase_ , UpperCAmelCase_ , i + middle , UpperCAmelCase_ )
bitonic_merge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
bitonic_merge(UpperCAmelCase_ , low + middle , UpperCAmelCase_ , UpperCAmelCase_ )
def lowerCamelCase ( UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int )-> None:
"""simple docstring"""
if length > 1:
a =int(length / 2 )
bitonic_sort(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , 1 )
bitonic_sort(UpperCAmelCase_ , low + middle , UpperCAmelCase_ , 0 )
bitonic_merge(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
_lowerCamelCase = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase = [int(item.strip()) for item in user_input.split(''',''')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('''\nSorted array in ascending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('''Sorted array in descending order is: ''', end='''''')
print(*unsorted, sep=''', ''')
| 321 | 0 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A : Dict = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def __lowerCAmelCase ( a__=None ) -> List[Any]:
if subparsers is not None:
__a = subparsers.add_parser('''tpu-config''' , description=_description )
else:
__a = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
__a = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=a__ , default=a__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=a__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=a__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
__a = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=a__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def __lowerCAmelCase ( a__ ) -> Optional[Any]:
__a = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(a__ ):
__a = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
__a = defaults.command_file
if not args.command and defaults.commands is not None:
__a = defaults.commands
if not args.tpu_name:
__a = defaults.tpu_name
if not args.tpu_zone:
__a = defaults.tpu_zone
if args.accelerate_version == "dev":
__a = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
__a = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , a__ ):
__a = F"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
__a = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , a__ ):
__a = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
__a = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [F"""pip install {args.accelerate_version}"""]
new_cmd += args.command
__a = '''; '''.join(a__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
__a = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"""Running {' '.join(a__ )}""" )
return
subprocess.run(a__ )
print('''Successfully setup pod.''' )
def __lowerCAmelCase ( ) -> str:
__a = tpu_command_parser()
__a = parser.parse_args()
tpu_command_launcher(a__ ) | 219 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
A : Optional[int] = logging.get_logger(__name__)
class __A( a ):
snake_case_ = ['''pixel_values''']
def __init__( self , _snake_case = True , _snake_case = None , _snake_case = PILImageResampling.BICUBIC , _snake_case = True , _snake_case = None , _snake_case = True , _snake_case = 1 / 255 , _snake_case = True , _snake_case = IMAGENET_DEFAULT_MEAN , _snake_case = IMAGENET_DEFAULT_STD , **_snake_case , ) -> None:
'''simple docstring'''
super().__init__(**_snake_case )
__a = size if size is not None else {'''shortest_edge''': 224}
__a = get_size_dict(_snake_case , default_to_square=_snake_case )
__a = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
__a = get_size_dict(_snake_case , param_name='''crop_size''' )
__a = do_resize
__a = size
__a = resample
__a = do_center_crop
__a = crop_size
__a = do_rescale
__a = rescale_factor
__a = do_normalize
__a = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
__a = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = PILImageResampling.BICUBIC , _snake_case = None , **_snake_case , ) -> np.ndarray:
'''simple docstring'''
__a = get_size_dict(_snake_case , default_to_square=_snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
__a = int((256 / 224) * size['''shortest_edge'''] )
__a = get_resize_output_image_size(_snake_case , size=_snake_case , default_to_square=_snake_case )
__a = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" )
return resize(
_snake_case , size=(size_dict['''height'''], size_dict['''width''']) , resample=_snake_case , data_format=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ) -> np.ndarray:
'''simple docstring'''
__a = get_size_dict(_snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" )
return center_crop(_snake_case , size=(size['''height'''], size['''width''']) , data_format=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case = None , **_snake_case , ) -> np.ndarray:
'''simple docstring'''
return rescale(_snake_case , scale=_snake_case , data_format=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case , _snake_case = None , **_snake_case , ) -> np.ndarray:
'''simple docstring'''
return normalize(_snake_case , mean=_snake_case , std=_snake_case , data_format=_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = ChannelDimension.FIRST , **_snake_case , ) -> BatchFeature:
'''simple docstring'''
__a = do_resize if do_resize is not None else self.do_resize
__a = resample if resample is not None else self.resample
__a = do_center_crop if do_center_crop is not None else self.do_center_crop
__a = do_rescale if do_rescale is not None else self.do_rescale
__a = rescale_factor if rescale_factor is not None else self.rescale_factor
__a = do_normalize if do_normalize is not None else self.do_normalize
__a = image_mean if image_mean is not None else self.image_mean
__a = image_std if image_std is not None else self.image_std
__a = size if size is not None else self.size
__a = get_size_dict(_snake_case , default_to_square=_snake_case )
__a = crop_size if crop_size is not None else self.crop_size
__a = get_size_dict(_snake_case , param_name='''crop_size''' )
__a = make_list_of_images(_snake_case )
if not valid_images(_snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
__a = [to_numpy_array(_snake_case ) for image in images]
if do_resize:
__a = [self.resize(_snake_case , _snake_case , _snake_case ) for image in images]
if do_center_crop:
__a = [self.center_crop(_snake_case , _snake_case ) for image in images]
if do_rescale:
__a = [self.rescale(_snake_case , _snake_case ) for image in images]
if do_normalize:
__a = [self.normalize(_snake_case , _snake_case , _snake_case ) for image in images]
__a = [to_channel_dimension_format(_snake_case , _snake_case ) for image in images]
__a = {'''pixel_values''': images}
return BatchFeature(data=_snake_case , tensor_type=_snake_case ) | 219 | 1 |
'''simple docstring'''
import unittest
from transformers import is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, require_torch, slow
if is_flax_available():
import optax
from flax.training.common_utils import onehot
from transformers import AutoTokenizer, FlaxMTaForConditionalGeneration
from transformers.models.ta.modeling_flax_ta import shift_tokens_right
@require_torch
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@slow
def __lowercase ( self : int ) -> List[str]:
lowerCAmelCase_ : int = FlaxMTaForConditionalGeneration.from_pretrained("""google/mt5-small""" )
lowerCAmelCase_ : int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
lowerCAmelCase_ : Any = tokenizer("""Hello there""" , return_tensors="""np""" ).input_ids
lowerCAmelCase_ : int = tokenizer("""Hi I am""" , return_tensors="""np""" ).input_ids
lowerCAmelCase_ : int = shift_tokens_right(lowerCamelCase , model.config.pad_token_id , model.config.decoder_start_token_id )
lowerCAmelCase_ : Optional[int] = model(lowerCamelCase , decoder_input_ids=lowerCamelCase ).logits
lowerCAmelCase_ : List[Any] = optax.softmax_cross_entropy(lowerCamelCase , onehot(lowerCamelCase , logits.shape[-1] ) ).mean()
lowerCAmelCase_ : Any = -(labels.shape[-1] * loss.item())
lowerCAmelCase_ : Optional[int] = -84.9_127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 398 |
'''simple docstring'''
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : str ) -> Any:
lowerCAmelCase_ : Union[str, Any] = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowerCAmelCase_, lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(lowerCamelCase , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""c"""] )
self.assertEqual(lowerCamelCase , [2] )
# Out indices set to match out features
lowerCAmelCase_, lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(["""a""", """c"""] , lowerCamelCase , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""a""", """c"""] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features set to match out indices
lowerCAmelCase_, lowerCAmelCase_ : List[str] = get_aligned_output_features_output_indices(lowerCamelCase , [0, 2] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""a""", """c"""] )
self.assertEqual(lowerCamelCase , [0, 2] )
# Out features selected from negative indices
lowerCAmelCase_, lowerCAmelCase_ : Any = get_aligned_output_features_output_indices(lowerCamelCase , [-3, -1] , lowerCamelCase )
self.assertEqual(lowerCamelCase , ["""a""", """c"""] )
self.assertEqual(lowerCamelCase , [-3, -1] )
def __lowercase ( self : List[str] ) -> Optional[int]:
# Stage names must be set
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , lowerCamelCase )
# Out features must be a list
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(lowerCamelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(lowerCamelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def __lowercase ( self : Dict ) -> Union[str, Any]:
lowerCAmelCase_ : List[str] = BackboneMixin()
lowerCAmelCase_ : List[str] = ["""a""", """b""", """c"""]
lowerCAmelCase_ : Tuple = ["""a""", """c"""]
lowerCAmelCase_ : Optional[Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCAmelCase_ : int = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCAmelCase_ : Dict = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 398 | 1 |
'''simple docstring'''
def __snake_case ( lowerCAmelCase : str ):
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__UpperCAmelCase = sorted(string.lower() )
return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) )
if __name__ == "__main__":
_UpperCamelCase : List[str] = input('Enter a string ').strip()
_UpperCamelCase : List[Any] = is_isogram(input_str)
print(f"{input_str} is {'an' if isogram else 'not an'} isogram.")
| 396 | '''simple docstring'''
import argparse
from collections import defaultdict
import yaml
_UpperCamelCase : int = 'docs/source/en/_toctree.yml'
def __snake_case ( lowerCAmelCase : Union[str, Any] ):
__UpperCAmelCase = defaultdict(lowerCAmelCase )
__UpperCAmelCase = []
__UpperCAmelCase = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(lowerCAmelCase )
__UpperCAmelCase = new_doc_list
__UpperCAmelCase = [key for key, value in counts.items() if value > 1]
__UpperCAmelCase = []
for duplicate_key in duplicates:
__UpperCAmelCase = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(lowerCAmelCase ) > 1:
raise ValueError(
F"""{duplicate_key} is present several times in the documentation table of content at """
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
__UpperCAmelCase = sorted(lowerCAmelCase , key=lambda lowerCAmelCase : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(lowerCAmelCase ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(lowerCAmelCase )
# Sort
return overview_doc
def __snake_case ( lowerCAmelCase : Union[str, Any]=False ):
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase = content[api_idx]['sections']
# Then to the model doc
__UpperCAmelCase = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
__UpperCAmelCase = api_doc[scheduler_idx]['sections']
__UpperCAmelCase = clean_doc_toc(lowerCAmelCase )
__UpperCAmelCase = False
if new_scheduler_doc != scheduler_doc:
__UpperCAmelCase = True
if overwrite:
__UpperCAmelCase = new_scheduler_doc
if diff:
if overwrite:
__UpperCAmelCase = api_doc
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase , allow_unicode=lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def __snake_case ( lowerCAmelCase : Tuple=False ):
with open(lowerCAmelCase , encoding='utf-8' ) as f:
__UpperCAmelCase = yaml.safe_load(f.read() )
# Get to the API doc
__UpperCAmelCase = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__UpperCAmelCase = content[api_idx]['sections']
# Then to the model doc
__UpperCAmelCase = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
__UpperCAmelCase = False
__UpperCAmelCase = api_doc[pipeline_idx]['sections']
__UpperCAmelCase = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
__UpperCAmelCase = pipeline_doc['section']
__UpperCAmelCase = clean_doc_toc(lowerCAmelCase )
if overwrite:
__UpperCAmelCase = new_sub_pipeline_doc
new_pipeline_docs.append(lowerCAmelCase )
# sort overall pipeline doc
__UpperCAmelCase = clean_doc_toc(lowerCAmelCase )
if new_pipeline_docs != pipeline_docs:
__UpperCAmelCase = True
if overwrite:
__UpperCAmelCase = new_pipeline_docs
if diff:
if overwrite:
__UpperCAmelCase = api_doc
with open(lowerCAmelCase , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(lowerCAmelCase , allow_unicode=lowerCAmelCase ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 396 | 1 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = prime_factors(__A )
if is_square_free(__A ):
return -1 if len(__A ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 | 0 |
"""simple docstring"""
import copy
import unittest
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
MODEL_FOR_QUESTION_ANSWERING_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
)
from transformers.models.layoutlmva.modeling_layoutlmva import LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case :
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=36 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_="gelu" , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=5_12 , SCREAMING_SNAKE_CASE_=16 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=0.02 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=6 , SCREAMING_SNAKE_CASE_=3 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=10_00 , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = patch_size
SCREAMING_SNAKE_CASE_ = text_seq_length
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = use_input_mask
SCREAMING_SNAKE_CASE_ = use_token_type_ids
SCREAMING_SNAKE_CASE_ = use_labels
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = type_sequence_label_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = coordinate_size
SCREAMING_SNAKE_CASE_ = shape_size
SCREAMING_SNAKE_CASE_ = num_labels
SCREAMING_SNAKE_CASE_ = num_choices
SCREAMING_SNAKE_CASE_ = scope
SCREAMING_SNAKE_CASE_ = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
SCREAMING_SNAKE_CASE_ = text_seq_length
SCREAMING_SNAKE_CASE_ = (image_size // patch_size) ** 2 + 1
SCREAMING_SNAKE_CASE_ = self.text_seq_length + self.image_seq_length
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE_ = bbox[i, j, 3]
SCREAMING_SNAKE_CASE_ = bbox[i, j, 1]
SCREAMING_SNAKE_CASE_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE_ = bbox[i, j, 2]
SCREAMING_SNAKE_CASE_ = bbox[i, j, 0]
SCREAMING_SNAKE_CASE_ = t
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE_ = random_attention_mask([self.batch_size, self.text_seq_length] )
SCREAMING_SNAKE_CASE_ = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE_ = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LayoutLMvaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
# text + image
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
SCREAMING_SNAKE_CASE_ = model(pixel_values=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = LayoutLMvaForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.num_labels
SCREAMING_SNAKE_CASE_ = LayoutLMvaForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LayoutLMvaForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
SCREAMING_SNAKE_CASE_ = model(
SCREAMING_SNAKE_CASE_ , bbox=SCREAMING_SNAKE_CASE_ , pixel_values=SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,(
SCREAMING_SNAKE_CASE_
) ,
) = config_and_inputs
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_torch
class snake_case ( __lowercase , __lowercase , unittest.TestCase ):
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = False
UpperCAmelCase__ = (
(
LayoutLMvaModel,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaForQuestionAnswering,
)
if is_torch_available()
else ()
)
UpperCAmelCase__ = (
{'''document-question-answering''': LayoutLMvaForQuestionAnswering, '''feature-extraction''': LayoutLMvaModel}
if is_torch_available()
else {}
)
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
return True
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LayoutLMvaModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=37 )
def _lowercase (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = {
k: v.unsqueeze(1 ).expand(-1 , self.model_tester.num_choices , -1 ).contiguous()
if isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) and v.ndim > 1
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = torch.ones(self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
elif model_class in get_values(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
SCREAMING_SNAKE_CASE_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
elif model_class in [
*get_values(SCREAMING_SNAKE_CASE_ ),
]:
SCREAMING_SNAKE_CASE_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ , )
return inputs_dict
def _lowercase (self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE_ = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
@slow
def _lowercase (self ):
"""simple docstring"""
for model_name in LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ = LayoutLMvaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
class snake_case ( unittest.TestCase ):
@cached_property
def _lowercase (self ):
"""simple docstring"""
return LayoutLMvaImageProcessor(apply_ocr=SCREAMING_SNAKE_CASE_ ) if is_vision_available() else None
@slow
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = LayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' ).to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = self.default_image_processor
SCREAMING_SNAKE_CASE_ = prepare_img()
SCREAMING_SNAKE_CASE_ = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).pixel_values.to(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 2]] )
SCREAMING_SNAKE_CASE_ = torch.tensor([[1, 2, 3, 4], [5, 6, 7, 8]] ).unsqueeze(0 )
# forward pass
SCREAMING_SNAKE_CASE_ = model(
input_ids=input_ids.to(SCREAMING_SNAKE_CASE_ ) , bbox=bbox.to(SCREAMING_SNAKE_CASE_ ) , pixel_values=pixel_values.to(SCREAMING_SNAKE_CASE_ ) , )
# verify the logits
SCREAMING_SNAKE_CASE_ = torch.Size((1, 1_99, 7_68) )
self.assertEqual(outputs.last_hidden_state.shape , SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.05_29, 0.36_18, 0.16_32], [-0.15_87, -0.16_67, -0.04_00], [-0.15_57, -0.16_71, -0.05_05]] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) | 626 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class snake_case ( __lowercase ):
UpperCAmelCase__ = (UnCLIPScheduler,)
def _lowercase (self , **SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'''num_train_timesteps''': 10_00,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**SCREAMING_SNAKE_CASE_ )
return config
def _lowercase (self ):
"""simple docstring"""
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=SCREAMING_SNAKE_CASE_ , prev_timestep=SCREAMING_SNAKE_CASE_ )
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(variance_type='''fixed_small_log''' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0_0_0_0e-1_0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_54_96_25 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.9_99_49_87 ) ) < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config(variance_type='''learned_range''' )
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = 0.5
assert scheduler._get_variance(1 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -10.1_71_27_90 < 1e-5
assert scheduler._get_variance(4_87 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -5.7_99_80_52 < 1e-5
assert scheduler._get_variance(9_99 , predicted_variance=SCREAMING_SNAKE_CASE_ ) - -0.0_01_00_11 < 1e-5
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_52.2_68_24_95 ) < 1e-2
assert abs(result_mean.item() - 0.3_28_47_43 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE_ = self.get_scheduler_config()
SCREAMING_SNAKE_CASE_ = scheduler_class(**SCREAMING_SNAKE_CASE_ )
scheduler.set_timesteps(25 )
SCREAMING_SNAKE_CASE_ = scheduler.timesteps
SCREAMING_SNAKE_CASE_ = self.dummy_model()
SCREAMING_SNAKE_CASE_ = self.dummy_sample_deter
SCREAMING_SNAKE_CASE_ = torch.manual_seed(0 )
for i, t in enumerate(SCREAMING_SNAKE_CASE_ ):
# 1. predict noise residual
SCREAMING_SNAKE_CASE_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if i + 1 == timesteps.shape[0]:
SCREAMING_SNAKE_CASE_ = None
else:
SCREAMING_SNAKE_CASE_ = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
SCREAMING_SNAKE_CASE_ = scheduler.step(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , prev_timestep=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ ).prev_sample
SCREAMING_SNAKE_CASE_ = pred_prev_sample
SCREAMING_SNAKE_CASE_ = torch.sum(torch.abs(SCREAMING_SNAKE_CASE_ ) )
SCREAMING_SNAKE_CASE_ = torch.mean(torch.abs(SCREAMING_SNAKE_CASE_ ) )
assert abs(result_sum.item() - 2_58.2_04_49_83 ) < 1e-2
assert abs(result_mean.item() - 0.3_36_20_38 ) < 1e-3
def _lowercase (self ):
"""simple docstring"""
pass
def _lowercase (self ):
"""simple docstring"""
pass | 626 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = None , UpperCamelCase = None , UpperCamelCase = None , ):
"""simple docstring"""
if config_name_or_path is None:
lowerCAmelCase__ : Dict = """facebook/rag-token-base""" if model_type == """rag_token""" else """facebook/rag-sequence-base"""
if generator_tokenizer_name_or_path is None:
lowerCAmelCase__ : Any = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
lowerCAmelCase__ : str = question_encoder_name_or_path
lowerCAmelCase__ : Optional[Any] = RagTokenForGeneration if model_type == """rag_token""" else RagSequenceForGeneration
# Save model.
lowerCAmelCase__ : Optional[Any] = RagConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = AutoConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Dict = AutoConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = gen_config
lowerCAmelCase__ : Dict = question_encoder_config
lowerCAmelCase__ : List[str] = model_class.from_pretrained_question_encoder_generator(
UpperCamelCase , UpperCamelCase , config=UpperCamelCase )
rag_model.save_pretrained(UpperCamelCase )
# Sanity check.
model_class.from_pretrained(UpperCamelCase )
# Save tokenizers.
lowerCAmelCase__ : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase )
gen_tokenizer.save_pretrained(dest_dir / """generator_tokenizer/""" )
lowerCAmelCase__ : str = AutoTokenizer.from_pretrained(UpperCamelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / """question_encoder_tokenizer/""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
_lowerCAmelCase = parser.parse_args()
_lowerCAmelCase = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 160 |
'''simple docstring'''
import functools
import gc
import inspect
import torch
from .imports import is_npu_available, is_xpu_available
def _SCREAMING_SNAKE_CASE ( *UpperCamelCase ):
"""simple docstring"""
if not isinstance(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : List[Any] = list(UpperCamelCase )
for i in range(len(UpperCamelCase ) ):
lowerCAmelCase__ : Dict = None
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
return objects
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : Union[str, Any] = [
"""CUDA out of memory.""", # CUDA OOM
"""cuDNN error: CUDNN_STATUS_NOT_SUPPORTED.""", # CUDNN SNAFU
"""DefaultCPUAllocator: can't allocate memory""", # CPU OOM
]
if isinstance(UpperCamelCase , UpperCamelCase ) and len(exception.args ) == 1:
return any(err in exception.args[0] for err in _statements )
return False
def _SCREAMING_SNAKE_CASE ( UpperCamelCase = None , UpperCamelCase = 128 ):
"""simple docstring"""
if function is None:
return functools.partial(UpperCamelCase , starting_batch_size=UpperCamelCase )
lowerCAmelCase__ : Any = starting_batch_size
def decorator(*UpperCamelCase , **UpperCamelCase ):
nonlocal batch_size
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
lowerCAmelCase__ : Optional[Any] = list(inspect.signature(UpperCamelCase ).parameters.keys() )
# Guard against user error
if len(UpperCamelCase ) < (len(UpperCamelCase ) + 1):
lowerCAmelCase__ : Any = """, """.join([f"""{arg}={value}""" for arg, value in zip(params[1:] , args[1:] )] )
raise TypeError(
f"""Batch size was passed into `{function.__name__}` as the first argument when called."""
f"""Remove this as the decorator already does so: `{function.__name__}({arg_str})`""" )
while True:
if batch_size == 0:
raise RuntimeError("""No executable batch size found, reached zero.""" )
try:
return function(UpperCamelCase , *UpperCamelCase , **UpperCamelCase )
except Exception as e:
if should_reduce_batch_size(UpperCamelCase ):
gc.collect()
if is_xpu_available():
torch.xpu.empty_cache()
elif is_npu_available():
torch.npu.empty_cache()
else:
torch.cuda.empty_cache()
batch_size //= 2
else:
raise
return decorator
| 160 | 1 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCAmelCase__ ( UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ):
'''simple docstring'''
_a : int = AutoConfig.from_pretrained(UpperCamelCase__ , **UpperCamelCase__ )
_a : Any = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 389 |
"""simple docstring"""
from collections.abc import Sequence
def lowerCAmelCase__ ( UpperCamelCase__ = None ):
'''simple docstring'''
if nums is None or not nums:
raise ValueError("""Input sequence should not be empty""" )
_a : List[Any] = nums[0]
for i in range(1 , len(UpperCamelCase__ ) ):
_a : int = nums[i]
_a : Any = max(UpperCamelCase__ , ans + num , UpperCamelCase__ )
return ans
if __name__ == "__main__":
import doctest
doctest.testmod()
# Try on a sample input from the user
_snake_case = int(input('Enter number of elements : ').strip())
_snake_case = list(map(int, input('\nEnter the numbers : ').strip().split()))[:n]
print(max_subsequence_sum(array))
| 389 | 1 |
from __future__ import annotations
from typing import Any
class lowerCamelCase :
def __init__(self : Optional[int] , _A : int ) -> None:
snake_case = num_of_nodes
snake_case = []
snake_case = {}
def UpperCAmelCase(self : int , _A : int , _A : int , _A : int ) -> None:
self.m_edges.append([u_node, v_node, weight] )
def UpperCAmelCase(self : Optional[int] , _A : int ) -> int:
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def UpperCAmelCase(self : str , _A : int ) -> None:
if self.m_component[u_node] != u_node:
for k in self.m_component:
snake_case = self.find_component(_A )
def UpperCAmelCase(self : Union[str, Any] , _A : list[int] , _A : int , _A : int ) -> None:
if component_size[u_node] <= component_size[v_node]:
snake_case = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_A )
elif component_size[u_node] >= component_size[v_node]:
snake_case = self.find_component(_A )
component_size[u_node] += component_size[v_node]
self.set_component(_A )
def UpperCAmelCase(self : int ) -> None:
snake_case = []
snake_case = 0
snake_case = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
snake_case = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
snake_case , snake_case , snake_case = edge
snake_case = self.m_component[u]
snake_case = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
snake_case = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_A , _A ):
snake_case , snake_case , snake_case = edge
snake_case = self.m_component[u]
snake_case = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_A , _A , _A )
print(f'Added edge [{u} - {v}]\nAdded weight: {w}\n' )
num_of_components -= 1
snake_case = [-1] * self.m_num_of_nodes
print(f'The total weight of the minimal spanning tree is: {mst_weight}' )
def lowercase_ ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod()
| 294 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class lowerCamelCase ( A_ ):
def UpperCAmelCase(self : str ) -> Dict:
snake_case = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase(self : Optional[int] ) -> Optional[int]:
with self.assertRaises(_A ):
snake_case = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def UpperCAmelCase(self : Dict ) -> str:
with self.assertRaises(_A ):
snake_case = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def UpperCAmelCase(self : List[Any] ) -> List[str]:
snake_case = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase(self : Tuple ) -> List[Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def UpperCAmelCase(self : List[Any] ) -> Dict:
snake_case = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def UpperCAmelCase(self : Optional[int] ) -> Optional[int]:
snake_case = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def UpperCAmelCase(self : Any ) -> Tuple:
snake_case = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def UpperCAmelCase(self : str ) -> Union[str, Any]:
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
snake_case = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def UpperCAmelCase(self : Optional[int] ) -> Tuple:
snake_case = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def UpperCAmelCase(self : str ) -> Dict:
snake_case = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def UpperCAmelCase(self : Optional[int] ) -> Any:
import PIL.Image
snake_case = PIL.Image.fromarray(np.arange(1_0 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=_A ) as mock_cast_to_python_objects:
snake_case = pa.array(TypedSequence([{"path": None, "bytes": b"image_bytes"}, pil_image] , type=Image() ) )
snake_case , snake_case = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , _A )
self.assertFalse(kwargs["optimize_list_casting"] )
def lowercase_ ( A__ , A__ ) -> Tuple:
"""simple docstring"""
snake_case = pa.BufferReader(A__ ) if isinstance(A__ , pa.Buffer ) else pa.memory_map(A__ )
snake_case = pa.ipc.open_stream(A__ )
snake_case = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase_ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = pa.BufferOutputStream()
snake_case = pa.schema(A__ ) if fields else None
with ArrowWriter(stream=A__ , schema=A__ , writer_batch_size=A__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case , snake_case = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase_ ( ) -> str:
"""simple docstring"""
snake_case = pa.BufferOutputStream()
snake_case = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=A__ , features=A__ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
snake_case , snake_case = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
snake_case = pa.BufferReader(output.getvalue() )
snake_case = pa.ipc.open_stream(A__ )
snake_case = f.read_all()
snake_case = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(A__ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
snake_case = pa.BufferOutputStream()
with ArrowWriter(
stream=A__ , writer_batch_size=A__ , hash_salt="split_name" , check_duplicates=A__ , ) as writer:
with pytest.raises(A__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
snake_case , snake_case = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowercase_ ( A__ ) -> str:
"""simple docstring"""
snake_case = pa.BufferOutputStream()
with ArrowWriter(
stream=A__ , writer_batch_size=A__ , hash_salt="split_name" , check_duplicates=A__ , ) as writer:
with pytest.raises(A__ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
snake_case , snake_case = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowercase_ ( A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = pa.BufferOutputStream()
with ArrowWriter(
stream=A__ , writer_batch_size=A__ , hash_salt="split_name" , check_duplicates=A__ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
snake_case , snake_case = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase_ ( A__ , A__ ) -> Dict:
"""simple docstring"""
snake_case = pa.BufferOutputStream()
snake_case = pa.schema(A__ ) if fields else None
with ArrowWriter(stream=A__ , schema=A__ , writer_batch_size=A__ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
snake_case , snake_case = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase_ ( A__ , A__ ) -> Union[str, Any]:
"""simple docstring"""
snake_case = pa.BufferOutputStream()
snake_case = pa.schema(A__ ) if fields else None
with ArrowWriter(stream=A__ , schema=A__ , writer_batch_size=A__ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
snake_case , snake_case = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase_ ( A__ , A__ ) -> List[Any]:
"""simple docstring"""
snake_case = pa.BufferOutputStream()
snake_case = pa.schema(A__ ) if fields else None
with ArrowWriter(stream=A__ , schema=A__ , writer_batch_size=A__ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
snake_case , snake_case = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
snake_case = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase_ ( ) -> Dict:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case = {"col_1": pa.string(), "col_2": pa.intaa()}
snake_case = os.path.join(A__ , "test.arrow" )
with ArrowWriter(path=A__ , schema=pa.schema(A__ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
snake_case , snake_case = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(A__ , metadata=writer._schema.metadata )
_check_output(A__ , 1 )
def lowercase_ ( A__ ) -> int:
"""simple docstring"""
if pa.types.is_list(A__ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowercase_ ( A__ , A__ ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , A__ ):
change_first_primitive_element_in_list(lst[0] , A__ )
else:
snake_case = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase_ ( A__ , A__ , A__ ) -> Tuple:
"""simple docstring"""
snake_case = pa.array(TypedSequence(A__ , optimized_int_type=A__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase_ ( A__ , A__ , A__ ) -> List[str]:
"""simple docstring"""
snake_case = pa.array(OptimizedTypedSequence(A__ , col=A__ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
snake_case = copy.deepcopy(A__ )
snake_case = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(A__ , A__ )
snake_case = pa.array(OptimizedTypedSequence(A__ , col=A__ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def lowercase_ ( A__ , A__ ) -> Optional[Any]:
"""simple docstring"""
snake_case = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=A__ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowercase_ ( A__ ) -> Optional[int]:
"""simple docstring"""
snake_case = "mock://dataset-train.arrow"
with ArrowWriter(path=A__ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(A__ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case , snake_case = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(A__ )
def lowercase_ ( ) -> Tuple:
"""simple docstring"""
snake_case = pa.BufferOutputStream()
with ParquetWriter(stream=A__ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
snake_case , snake_case = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
snake_case = pa.BufferReader(output.getvalue() )
snake_case = pq.read_table(A__ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def lowercase_ ( A__ , A__ ) -> Dict:
"""simple docstring"""
import PIL.Image
snake_case = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(A__ , format="png" )
snake_case = pa.BufferOutputStream()
with ParquetWriter(
stream=A__ , features=Features({"image": Image()} ) , embed_local_files=A__ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
snake_case = pa.BufferReader(output.getvalue() )
snake_case = pq.read_table(A__ )
snake_case = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , A__ )
with open(A__ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowercase_ ( ) -> List[Any]:
"""simple docstring"""
snake_case = pa.schema([pa.field("col_1" , pa.string() , nullable=A__ )] )
snake_case = pa.BufferOutputStream()
with ArrowWriter(stream=A__ ) as writer:
writer._build_writer(inferred_schema=A__ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 294 | 1 |
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def __magic_name__ ( lowercase , lowercase ) -> List[Any]:
"""simple docstring"""
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ) -> Any:
"""simple docstring"""
lowercase_ : List[Any] = tmp_path / """cache"""
lowercase_ : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowercase_ : Tuple = SqlDatasetReader(
"""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ) -> Tuple:
"""simple docstring"""
lowercase_ : Tuple = tmp_path / """cache"""
lowercase_ : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
lowercase_ : Dict = features.copy() if features else default_expected_features
lowercase_ : int = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowercase_ : Union[str, Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , features=lowercase , cache_dir=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
def __magic_name__ ( lowercase ) -> Any:
"""simple docstring"""
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
lowercase_ : Tuple = con.cursor()
cur.execute("""SELECT * FROM dataset""" )
for row in cur:
yield row
@require_sqlalchemy
def __magic_name__ ( lowercase , lowercase , lowercase ) -> int:
"""simple docstring"""
lowercase_ : Optional[Any] = tmp_path / """cache"""
lowercase_ : int = os.path.join(lowercase , """tmp.sql""" )
lowercase_ : Any = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=1 ).write()
lowercase_ : List[str] = iter_sql_file(lowercase )
lowercase_ : Dict = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Optional[int]:
"""simple docstring"""
lowercase_ : Any = tmp_path / """cache"""
lowercase_ : Optional[Any] = os.path.join(lowercase , """tmp.sql""" )
lowercase_ : List[Any] = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=2 ).write()
lowercase_ : Any = iter_sql_file(lowercase )
lowercase_ : Tuple = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def __magic_name__ ( lowercase , lowercase , lowercase ) -> Dict:
"""simple docstring"""
lowercase_ : Dict = tmp_path / """cache"""
lowercase_ : List[str] = os.path.join(lowercase , """tmp.sql""" )
lowercase_ : int = SqlDatasetReader("""dataset""" , """sqlite:///""" + sqlite_path , cache_dir=lowercase ).read()
with pytest.raises(lowercase ):
SqlDatasetWriter(lowercase , """dataset""" , """sqlite:///""" + output_sqlite_path , num_proc=0 ).write() | 458 | """simple docstring"""
import os
import pytest
from attr import dataclass
SCREAMING_SNAKE_CASE__:List[str] = """us-east-1""" # defaults region
@dataclass
class snake_case__ :
_snake_case : str
_snake_case : Optional[Any] = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
_snake_case : Optional[Any] = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5_500,
}
_snake_case : List[str] = {**hyperparameters, """max_steps""": 1_000}
@property
def a__ ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def a__ ( self ):
return F"{self.framework}-transfromers-test"
@property
def a__ ( self ):
return F"./tests/sagemaker/scripts/{self.framework}"
@property
def a__ ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def _lowerCamelCase( a ):
__a = SageMakerTestEnvironment(framework=request.cls.framework )
| 528 | 0 |
'''simple docstring'''
import os
import posixpath
import uuid
from dataclasses import dataclass
from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union
import numpy as np
import pyarrow as pa
import datasets
from datasets.arrow_writer import ArrowWriter, ParquetWriter
from datasets.config import MAX_SHARD_SIZE
from datasets.filesystems import (
is_remote_filesystem,
rename,
)
from datasets.iterable_dataset import _BaseExamplesIterable
from datasets.utils.py_utils import convert_file_size_to_int
lowerCAmelCase__ : Any = datasets.utils.logging.get_logger(__name__)
if TYPE_CHECKING:
import pyspark
@dataclass
class _A ( datasets.BuilderConfig ):
'''simple docstring'''
_lowercase = None
def lowerCAmelCase__ ( UpperCAmelCase , UpperCAmelCase , ):
"""simple docstring"""
import pyspark
def generate_fn():
snake_case__ : Optional[Any] = df.select("""*""" , pyspark.sql.functions.spark_partition_id().alias("""part_id""" ) )
for partition_id in partition_order:
snake_case__ : List[Any] = df_with_partition_id.select("""*""" ).where(f"""part_id = {partition_id}""" ).drop("""part_id""" )
snake_case__ : Dict = partition_df.collect()
snake_case__ : Any = 0
for row in rows:
yield f"""{partition_id}_{row_id}""", row.asDict()
row_id += 1
return generate_fn
class _A ( _BaseExamplesIterable ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : Optional[Any]=None , )-> Any:
snake_case__ : int = df
snake_case__ : Any = partition_order or range(self.df.rdd.getNumPartitions() )
snake_case__ : str = _generate_iterable_examples(self.df , self.partition_order )
def __iter__( self : int )-> Union[str, Any]:
yield from self.generate_examples_fn()
def __lowerCAmelCase ( self : Tuple , lowerCamelCase : np.random.Generator )-> "SparkExamplesIterable":
snake_case__ : Optional[Any] = list(range(self.df.rdd.getNumPartitions() ) )
generator.shuffle(lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : int , lowerCamelCase : int )-> "SparkExamplesIterable":
snake_case__ : Dict = self.split_shard_indices_by_worker(lowerCamelCase , lowerCamelCase )
return SparkExamplesIterable(self.df , partition_order=lowerCamelCase )
@property
def __lowerCAmelCase ( self : List[str] )-> int:
return len(self.partition_order )
class _A ( datasets.DatasetBuilder ):
'''simple docstring'''
_lowercase = SparkConfig
def __init__( self : Tuple , lowerCamelCase : "pyspark.sql.DataFrame" , lowerCamelCase : str = None , lowerCamelCase : str = None , **lowerCamelCase : Dict , )-> str:
import pyspark
snake_case__ : str = pyspark.sql.SparkSession.builder.getOrCreate()
snake_case__ : Optional[int] = df
snake_case__ : Tuple = working_dir
super().__init__(
cache_dir=lowerCamelCase , config_name=str(self.df.semanticHash() ) , **lowerCamelCase , )
def __lowerCAmelCase ( self : Optional[Any] )-> Optional[Any]:
# Returns the path of the created file.
def create_cache_and_write_probe(lowerCamelCase : Dict ):
# makedirs with exist_ok will recursively create the directory. It will not throw an error if directories
# already exist.
os.makedirs(self._cache_dir , exist_ok=lowerCamelCase )
snake_case__ : Union[str, Any] = os.path.join(self._cache_dir , """fs_test""" + uuid.uuida().hex )
# Opening the file in append mode will create a new file unless it already exists, in which case it will not
# change the file contents.
open(lowerCamelCase , """a""" )
return [probe_file]
if self._spark.conf.get("""spark.master""" , """""" ).startswith("""local""" ):
return
# If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS
# accessible to the driver.
# TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error.
if self._cache_dir:
snake_case__ : Optional[Any] = (
self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase ).collect()
)
if os.path.isfile(probe[0] ):
return
raise ValueError(
"""When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir""" )
def __lowerCAmelCase ( self : Union[str, Any] )-> str:
return datasets.DatasetInfo(features=self.config.features )
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : datasets.download.download_manager.DownloadManager )-> List[str]:
return [datasets.SplitGenerator(name=datasets.Split.TRAIN )]
def __lowerCAmelCase ( self : Any , lowerCamelCase : Optional[int] )-> Optional[int]:
import pyspark
def get_arrow_batch_size(lowerCamelCase : Union[str, Any] ):
for batch in it:
yield pa.RecordBatch.from_pydict({"""batch_bytes""": [batch.nbytes]} )
snake_case__ : Union[str, Any] = self.df.count()
snake_case__ : Any = df_num_rows if df_num_rows <= 100 else 100
# Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample.
snake_case__ : Any = (
self.df.limit(lowerCamelCase )
.repartition(1 )
.mapInArrow(lowerCamelCase , """batch_bytes: long""" )
.agg(pyspark.sql.functions.sum("""batch_bytes""" ).alias("""sample_bytes""" ) )
.collect()[0]
.sample_bytes
/ sample_num_rows
)
snake_case__ : List[Any] = approx_bytes_per_row * df_num_rows
if approx_total_size > max_shard_size:
# Make sure there is at least one row per partition.
snake_case__ : Any = min(lowerCamelCase , int(approx_total_size / max_shard_size ) )
snake_case__ : Any = self.df.repartition(lowerCamelCase )
def __lowerCAmelCase ( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : int , )-> Iterable[Tuple[int, bool, Union[int, tuple]]]:
import pyspark
snake_case__ : List[Any] = ParquetWriter if file_format == """parquet""" else ArrowWriter
snake_case__ : str = os.path.join(self._working_dir , os.path.basename(lowerCamelCase ) ) if self._working_dir else fpath
snake_case__ : int = file_format == """parquet"""
# Define these so that we don't reference self in write_arrow, which will result in a pickling error due to
# pickling the SparkContext.
snake_case__ : Optional[Any] = self.config.features
snake_case__ : int = self._writer_batch_size
snake_case__ : List[Any] = self._fs.storage_options
def write_arrow(lowerCamelCase : Any ):
# Within the same SparkContext, no two task attempts will share the same attempt ID.
snake_case__ : Tuple = pyspark.TaskContext().taskAttemptId()
snake_case__ : Union[str, Any] = next(lowerCamelCase , lowerCamelCase )
if first_batch is None:
# Some partitions might not receive any data.
return pa.RecordBatch.from_arrays(
[[task_id], [0], [0]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
snake_case__ : Optional[int] = 0
snake_case__ : List[Any] = writer_class(
features=lowerCamelCase , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
snake_case__ : List[Any] = pa.Table.from_batches([first_batch] )
writer.write_table(lowerCamelCase )
for batch in it:
if max_shard_size is not None and writer._num_bytes >= max_shard_size:
snake_case__ : Optional[Any] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
shard_id += 1
snake_case__ : Dict = writer_class(
features=writer._features , path=working_fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , writer_batch_size=lowerCamelCase , storage_options=lowerCamelCase , embed_local_files=lowerCamelCase , )
snake_case__ : Optional[int] = pa.Table.from_batches([batch] )
writer.write_table(lowerCamelCase )
if writer._num_bytes > 0:
snake_case__ : Optional[int] = writer.finalize()
writer.close()
yield pa.RecordBatch.from_arrays(
[[task_id], [num_examples], [num_bytes]] , names=["""task_id""", """num_examples""", """num_bytes"""] , )
if working_fpath != fpath:
for file in os.listdir(os.path.dirname(lowerCamelCase ) ):
snake_case__ : Optional[int] = os.path.join(os.path.dirname(lowerCamelCase ) , os.path.basename(lowerCamelCase ) )
shutil.move(lowerCamelCase , lowerCamelCase )
snake_case__ : Any = (
self.df.mapInArrow(lowerCamelCase , """task_id: long, num_examples: long, num_bytes: long""" )
.groupBy("""task_id""" )
.agg(
pyspark.sql.functions.sum("""num_examples""" ).alias("""total_num_examples""" ) , pyspark.sql.functions.sum("""num_bytes""" ).alias("""total_num_bytes""" ) , pyspark.sql.functions.count("""num_bytes""" ).alias("""num_shards""" ) , pyspark.sql.functions.collect_list("""num_examples""" ).alias("""shard_lengths""" ) , )
.collect()
)
for row in stats:
yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths)
def __lowerCAmelCase ( self : Dict , lowerCamelCase : "datasets.SplitGenerator" , lowerCamelCase : str = "arrow" , lowerCamelCase : Optional[Union[str, int]] = None , lowerCamelCase : Optional[int] = None , **lowerCamelCase : List[str] , )-> Optional[int]:
self._validate_cache_dir()
snake_case__ : Optional[int] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE )
self._repartition_df_if_needed(lowerCamelCase )
snake_case__ : int = not is_remote_filesystem(self._fs )
snake_case__ : List[Any] = os.path.join if is_local else posixpath.join
snake_case__ : Union[str, Any] = """-TTTTT-SSSSS-of-NNNNN"""
snake_case__ : str = F"""{self.name}-{split_generator.name}{SUFFIX}.{file_format}"""
snake_case__ : str = path_join(self._output_dir , lowerCamelCase )
snake_case__ : Dict = 0
snake_case__ : Optional[int] = 0
snake_case__ : Tuple = 0
snake_case__ : Union[str, Any] = []
snake_case__ : Optional[int] = []
for task_id, content in self._prepare_split_single(lowerCamelCase , lowerCamelCase , lowerCamelCase ):
(
snake_case__
) : str = content
if num_bytes > 0:
total_num_examples += num_examples
total_num_bytes += num_bytes
total_shards += num_shards
task_id_and_num_shards.append((task_id, num_shards) )
all_shard_lengths.extend(lowerCamelCase )
snake_case__ : str = total_num_examples
snake_case__ : Optional[int] = total_num_bytes
# should rename everything at the end
logger.debug(F"""Renaming {total_shards} shards.""" )
if total_shards > 1:
snake_case__ : Union[str, Any] = all_shard_lengths
# Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a
# pickling error due to pickling the SparkContext.
snake_case__ : Any = self._fs
# use the -SSSSS-of-NNNNN pattern
def _rename_shard(
lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , ):
rename(
lowerCamelCase , fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace("""TTTTT-SSSSS""" , F"""{global_shard_id:05d}""" ).replace("""NNNNN""" , F"""{total_shards:05d}""" ) , )
snake_case__ : Optional[Any] = []
snake_case__ : str = 0
for i in range(len(lowerCamelCase ) ):
snake_case__ : Optional[Any] = task_id_and_num_shards[i]
for shard_id in range(lowerCamelCase ):
args.append([task_id, shard_id, global_shard_id] )
global_shard_id += 1
self._spark.sparkContext.parallelize(lowerCamelCase , len(lowerCamelCase ) ).map(lambda lowerCamelCase : _rename_shard(*lowerCamelCase ) ).collect()
else:
# don't use any pattern
snake_case__ : Tuple = 0
snake_case__ : Dict = task_id_and_num_shards[0][0]
self._rename(
fpath.replace("""SSSSS""" , F"""{shard_id:05d}""" ).replace("""TTTTT""" , F"""{task_id:05d}""" ) , fpath.replace(lowerCamelCase , """""" ) , )
def __lowerCAmelCase ( self : int , lowerCamelCase : "datasets.SplitGenerator" , )-> SparkExamplesIterable:
return SparkExamplesIterable(self.df )
| 709 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _A ( unittest.TestCase ):
'''simple docstring'''
_lowercase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : List[str] , lowerCamelCase : Dict )-> str:
snake_case__ : List[Any] = TextaTextGenerationPipeline(model=lowerCamelCase , tokenizer=lowerCamelCase )
return generator, ["Something to write", "Something else"]
def __lowerCAmelCase ( self : Any , lowerCamelCase : int , lowerCamelCase : Any )-> str:
snake_case__ : Tuple = generator("""Something there""" )
self.assertEqual(lowerCamelCase , [{"""generated_text""": ANY(lowerCamelCase )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
snake_case__ : Any = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
] , )
snake_case__ : int = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
[{"""generated_text""": ANY(lowerCamelCase )}, {"""generated_text""": ANY(lowerCamelCase )}],
] , )
with self.assertRaises(lowerCamelCase ):
generator(4 )
@require_torch
def __lowerCAmelCase ( self : List[str] )-> Optional[int]:
snake_case__ : str = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
snake_case__ : int = generator("""Something there""" , do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase , [{"""generated_text""": """"""}] )
snake_case__ : List[Any] = 3
snake_case__ : Any = generator(
"""Something there""" , num_return_sequences=lowerCamelCase , num_beams=lowerCamelCase , )
snake_case__ : int = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(lowerCamelCase , lowerCamelCase )
snake_case__ : Tuple = generator("""This is a test""" , do_sample=lowerCamelCase , num_return_sequences=2 , return_tensors=lowerCamelCase )
self.assertEqual(
lowerCamelCase , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
snake_case__ : Any = generator.model.config.eos_token_id
snake_case__ : Optional[Any] = """<pad>"""
snake_case__ : Tuple = generator(
["""This is a test""", """This is a second test"""] , do_sample=lowerCamelCase , num_return_sequences=2 , batch_size=2 , return_tensors=lowerCamelCase , )
self.assertEqual(
lowerCamelCase , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def __lowerCAmelCase ( self : Dict )-> Dict:
snake_case__ : Optional[Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
snake_case__ : List[str] = generator("""Something there""" , do_sample=lowerCamelCase )
self.assertEqual(lowerCamelCase , [{"""generated_text""": """"""}] )
| 172 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
lowerCamelCase = logging.getLogger(__name__)
def a_ ( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple ):
'''simple docstring'''
_lowerCamelCase : str =np.argmax(A_ , axis=1 )
return np.sum(outputs == labels )
def a_ ( SCREAMING_SNAKE_CASE__ : Union[str, Any] ):
'''simple docstring'''
with open(A_ , encoding='utf_8' ) as f:
_lowerCamelCase : Dict =csv.reader(A_ )
_lowerCamelCase : Optional[int] =[]
next(A_ ) # skip the first line
for line in tqdm(A_ ):
output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def a_ ( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict ):
'''simple docstring'''
_lowerCamelCase : str =[]
for dataset in encoded_datasets:
_lowerCamelCase : Union[str, Any] =len(A_ )
_lowerCamelCase : List[Any] =np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
_lowerCamelCase : List[Any] =np.zeros((n_batch, 2) , dtype=np.intaa )
_lowerCamelCase : Union[str, Any] =np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa )
_lowerCamelCase : Optional[int] =np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(A_ ):
_lowerCamelCase : str =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCamelCase : List[Any] =[start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
_lowerCamelCase : Union[str, Any] =with_conta
_lowerCamelCase : Any =with_conta
_lowerCamelCase : Any =len(A_ ) - 1
_lowerCamelCase : List[Any] =len(A_ ) - 1
_lowerCamelCase : Tuple =with_conta
_lowerCamelCase : str =with_conta
_lowerCamelCase : List[Any] =mc_label
_lowerCamelCase : Any =(input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(A_ ) for t in all_inputs ) )
return tensor_datasets
def a_ ( ):
'''simple docstring'''
_lowerCamelCase : List[str] =argparse.ArgumentParser()
parser.add_argument('--model_name' , type=A_ , default='openai-gpt' , help='pretrained model name' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' )
parser.add_argument(
'--output_dir' , default=A_ , type=A_ , required=A_ , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument('--train_dataset' , type=A_ , default='' )
parser.add_argument('--eval_dataset' , type=A_ , default='' )
parser.add_argument('--seed' , type=A_ , default=42 )
parser.add_argument('--num_train_epochs' , type=A_ , default=3 )
parser.add_argument('--train_batch_size' , type=A_ , default=8 )
parser.add_argument('--eval_batch_size' , type=A_ , default=16 )
parser.add_argument('--adam_epsilon' , default=1e-8 , type=A_ , help='Epsilon for Adam optimizer.' )
parser.add_argument('--max_grad_norm' , type=A_ , default=1 )
parser.add_argument(
'--max_steps' , default=-1 , type=A_ , help=(
'If > 0: set total number of training steps to perform. Override num_train_epochs.'
) , )
parser.add_argument(
'--gradient_accumulation_steps' , type=A_ , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--learning_rate' , type=A_ , default=6.25e-5 )
parser.add_argument('--warmup_steps' , default=0 , type=A_ , help='Linear warmup over warmup_steps.' )
parser.add_argument('--lr_schedule' , type=A_ , default='warmup_linear' )
parser.add_argument('--weight_decay' , type=A_ , default=0.01 )
parser.add_argument('--lm_coef' , type=A_ , default=0.9 )
parser.add_argument('--n_valid' , type=A_ , default=374 )
parser.add_argument('--server_ip' , type=A_ , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=A_ , default='' , help='Can be used for distant debugging.' )
_lowerCamelCase : Union[str, Any] =parser.parse_args()
print(A_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=A_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
_lowerCamelCase : Optional[int] =torch.device('cuda' if torch.cuda.is_available() else 'cpu' )
_lowerCamelCase : Tuple =torch.cuda.device_count()
logger.info('device: {}, n_gpu {}'.format(A_ , A_ ) )
if not args.do_train and not args.do_eval:
raise ValueError('At least one of `do_train` or `do_eval` must be True.' )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
_lowerCamelCase : List[str] =['_start_', '_delimiter_', '_classify_']
_lowerCamelCase : Dict =OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(A_ )
_lowerCamelCase : int =tokenizer.convert_tokens_to_ids(A_ )
_lowerCamelCase : str =OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(A_ ) )
model.to(A_ )
# Load and encode the datasets
def tokenize_and_encode(SCREAMING_SNAKE_CASE__ : Optional[int] ):
if isinstance(A_ , A_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(A_ ) )
elif isinstance(A_ , A_ ):
return obj
return [tokenize_and_encode(A_ ) for o in obj]
logger.info('Encoding dataset...' )
_lowerCamelCase : Tuple =load_rocstories_dataset(args.train_dataset )
_lowerCamelCase : Optional[Any] =load_rocstories_dataset(args.eval_dataset )
_lowerCamelCase : Optional[int] =(train_dataset, eval_dataset)
_lowerCamelCase : Tuple =tokenize_and_encode(A_ )
# Compute the max input length for the Transformer
_lowerCamelCase : Any =model.config.n_positions // 2 - 2
_lowerCamelCase : Dict =max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
_lowerCamelCase : List[str] =min(A_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
_lowerCamelCase : int =pre_process_datasets(A_ , A_ , A_ , *A_ )
_lowerCamelCase , _lowerCamelCase : List[str] =tensor_datasets[0], tensor_datasets[1]
_lowerCamelCase : List[Any] =TensorDataset(*A_ )
_lowerCamelCase : Union[str, Any] =RandomSampler(A_ )
_lowerCamelCase : Tuple =DataLoader(A_ , sampler=A_ , batch_size=args.train_batch_size )
_lowerCamelCase : int =TensorDataset(*A_ )
_lowerCamelCase : Union[str, Any] =SequentialSampler(A_ )
_lowerCamelCase : List[str] =DataLoader(A_ , sampler=A_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
_lowerCamelCase : str =args.max_steps
_lowerCamelCase : Any =args.max_steps // (len(A_ ) // args.gradient_accumulation_steps) + 1
else:
_lowerCamelCase : Tuple =len(A_ ) // args.gradient_accumulation_steps * args.num_train_epochs
_lowerCamelCase : Tuple =list(model.named_parameters() )
_lowerCamelCase : int =['bias', 'LayerNorm.bias', 'LayerNorm.weight']
_lowerCamelCase : Tuple =[
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
_lowerCamelCase : int =AdamW(A_ , lr=args.learning_rate , eps=args.adam_epsilon )
_lowerCamelCase : Optional[Any] =get_linear_schedule_with_warmup(
A_ , num_warmup_steps=args.warmup_steps , num_training_steps=A_ )
if args.do_train:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : List[str] =0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ):
_lowerCamelCase : str =0
_lowerCamelCase : Optional[int] =0
_lowerCamelCase : Any =tqdm(A_ , desc='Training' )
for step, batch in enumerate(A_ ):
_lowerCamelCase : Dict =tuple(t.to(A_ ) for t in batch )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Union[str, Any] =batch
_lowerCamelCase : Any =model(A_ , mc_token_ids=A_ , lm_labels=A_ , mc_labels=A_ )
_lowerCamelCase : Tuple =args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
_lowerCamelCase : Optional[Any] =(
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
_lowerCamelCase : Union[str, Any] ='Training loss: {:.2e} lr: {:.2e}'.format(A_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
_lowerCamelCase : Dict =model.module if hasattr(A_ , 'module' ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
_lowerCamelCase : Optional[Any] =os.path.join(args.output_dir , A_ )
_lowerCamelCase : str =os.path.join(args.output_dir , A_ )
torch.save(model_to_save.state_dict() , A_ )
model_to_save.config.to_json_file(A_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
_lowerCamelCase : Union[str, Any] =OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
_lowerCamelCase : Any =OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(A_ )
if args.do_eval:
model.eval()
_lowerCamelCase , _lowerCamelCase : int =0, 0
_lowerCamelCase , _lowerCamelCase : List[Any] =0, 0
for batch in tqdm(A_ , desc='Evaluating' ):
_lowerCamelCase : Union[str, Any] =tuple(t.to(A_ ) for t in batch )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Any =batch
with torch.no_grad():
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Dict =model(
A_ , mc_token_ids=A_ , lm_labels=A_ , mc_labels=A_ )
_lowerCamelCase : Any =mc_logits.detach().cpu().numpy()
_lowerCamelCase : List[Any] =mc_labels.to('cpu' ).numpy()
_lowerCamelCase : str =accuracy(A_ , A_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
_lowerCamelCase : Dict =eval_loss / nb_eval_steps
_lowerCamelCase : Union[str, Any] =eval_accuracy / nb_eval_examples
_lowerCamelCase : Union[str, Any] =tr_loss / nb_tr_steps if args.do_train else None
_lowerCamelCase : Union[str, Any] ={'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
_lowerCamelCase : Tuple =os.path.join(args.output_dir , 'eval_results.txt' )
with open(A_ , 'w' ) as writer:
logger.info('***** Eval results *****' )
for key in sorted(result.keys() ):
logger.info(' %s = %s' , A_ , str(result[key] ) )
writer.write('%s = %s\n' % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 464 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCAmelCase : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCAmelCase : int = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Any = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : List[Any] = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__lowerCAmelCase : Any = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__lowerCAmelCase : Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__lowerCAmelCase : Optional[int] = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__lowerCAmelCase : List[str] = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase : str = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__lowerCAmelCase : Dict = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ = DPRContextEncoderTokenizer
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a__ = DPRQuestionEncoderTokenizer
__lowerCAmelCase : Optional[int] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__lowerCAmelCase : Union[str, Any] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__lowerCAmelCase : str = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_A )
class UpperCAmelCase_ :
'''simple docstring'''
def __call__( self : Tuple , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = None , **UpperCamelCase__ : List[str] , ) -> BatchEncoding:
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
elif titles is None or texts is None:
__magic_name__ = titles if texts is None else texts
return super().__call__(
UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , )
__magic_name__ = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles]
__magic_name__ = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts]
__magic_name__ = len(UpperCamelCase__ )
__magic_name__ = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages
assert len(UpperCamelCase__ ) == len(
UpperCamelCase__ ), F'''There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.'''
__magic_name__ = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["""input_ids"""]
__magic_name__ = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["""input_ids"""]
__magic_name__ = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ )
]
}
if return_attention_mask is not False:
__magic_name__ = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__magic_name__ = attention_mask
return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ )
def _lowercase ( self : List[Any] , UpperCamelCase__ : BatchEncoding , UpperCamelCase__ : DPRReaderOutput , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 4 , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
__magic_name__ = reader_input["""input_ids"""]
__magic_name__ , __magic_name__ , __magic_name__ = reader_output[:3]
__magic_name__ = len(UpperCamelCase__ )
__magic_name__ = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ )
__magic_name__ = []
for doc_id in sorted_docs:
__magic_name__ = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__magic_name__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__magic_name__ = sequence_ids.index(self.pad_token_id )
else:
__magic_name__ = len(UpperCamelCase__ )
__magic_name__ = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(UpperCamelCase__ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , ) -> List[DPRSpanPrediction]:
"""simple docstring"""
__magic_name__ = []
for start_index, start_score in enumerate(UpperCamelCase__ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__magic_name__ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ )
__magic_name__ = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F'''Wrong span indices: [{start_index}:{end_index}]'''
__magic_name__ = end_index - start_index + 1
assert length <= max_answer_length, F'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(UpperCamelCase__ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_A )
class UpperCAmelCase_ ( _A , _A ):
'''simple docstring'''
a__ = VOCAB_FILES_NAMES
a__ = READER_PRETRAINED_VOCAB_FILES_MAP
a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a__ = READER_PRETRAINED_INIT_CONFIGURATION
a__ = ["""input_ids""", """attention_mask"""]
a__ = DPRReaderTokenizer
| 529 | 0 |
'''simple docstring'''
from math import factorial
def _SCREAMING_SNAKE_CASE( snake_case_ : int = 20 ) ->int:
'''simple docstring'''
_lowercase : Optional[int] = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
_lowercase : str = n // 2
return int(factorial(snake_case_ ) / (factorial(snake_case_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowerCamelCase__ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 411 |
'''simple docstring'''
def _SCREAMING_SNAKE_CASE( snake_case_ : float ) ->float:
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case_ , snake_case_ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def _SCREAMING_SNAKE_CASE( snake_case_ : float ) ->float:
'''simple docstring'''
if edge <= 0 or not isinstance(snake_case_ , snake_case_ ):
raise ValueError('''Length must be a positive.''' )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 411 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 33 |
'''simple docstring'''
lowerCAmelCase__ = 'Alexander Joslin'
import operator as op
from .stack import Stack
def __UpperCAmelCase ( lowerCamelCase_) -> int:
UpperCamelCase__ : List[str] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
UpperCamelCase__ : Stack[int] = Stack()
UpperCamelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(lowerCamelCase_))
elif i in operators:
# RULE 2
operator_stack.push(lowerCamelCase_)
elif i == ")":
# RULE 4
UpperCamelCase__ : Optional[Any] = operator_stack.peek()
operator_stack.pop()
UpperCamelCase__ : Optional[Any] = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
UpperCamelCase__ : List[Any] = operators[opr](lowerCamelCase_ , lowerCamelCase_)
operand_stack.push(lowerCamelCase_)
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCAmelCase__ = '(5 + ((4 * 2) * (2 + 3)))'
# answer = 45
print(f'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 596 | 0 |
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version(">=", FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
UpperCAmelCase_ : str = get_logger(__name__)
def UpperCamelCase ( _A : Dict , _A : str , _A : Optional[int] , _A : Union[str, Any] , _A : List[Any]=0 )-> str:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
A__ = os.path.join(_A , _A )
if accelerator.process_index == 0:
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_A , _A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
A__ = os.path.join(_A , _A )
logger.info(f"""Saving model to {output_model_file}""" )
torch.save(_A , _A )
logger.info(f"""Model saved to {output_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = os.path.join(_A , f"""{MODEL_NAME}_{model_index}""" )
os.makedirs(_A , exist_ok=_A )
logger.info(f"""Saving model to {ckpt_dir}""" )
A__ = {"model": state_dict}
dist_cp.save_state_dict(
state_dict=_A , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Model saved to {ckpt_dir}""" )
def UpperCamelCase ( _A : Optional[Any] , _A : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Tuple=0 )-> List[Any]:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(_A ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
"Set the `sync_module_states` flag to `True` so that model states are synced across processes when "
"initializing FSDP object" )
return
A__ = f"""{MODEL_NAME}.bin""" if model_index == 0 else f"""{MODEL_NAME}_{model_index}.bin"""
A__ = os.path.join(_A , _A )
logger.info(f"""Loading model from {input_model_file}""" )
A__ = torch.load(_A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A__ = (
f"""{MODEL_NAME}_rank{accelerator.process_index}.bin"""
if model_index == 0
else f"""{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin"""
)
A__ = os.path.join(_A , _A )
logger.info(f"""Loading model from {input_model_file}""" )
A__ = torch.load(_A )
logger.info(f"""Model loaded from {input_model_file}""" )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A__ = (
os.path.join(_A , f"""{MODEL_NAME}_{model_index}""" )
if f"""{MODEL_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading model from {ckpt_dir}""" )
A__ = {"model": model.state_dict()}
dist_cp.load_state_dict(
state_dict=_A , storage_reader=dist_cp.FileSystemReader(_A ) , planner=DefaultLoadPlanner() , )
A__ = state_dict["model"]
logger.info(f"""Model loaded from {ckpt_dir}""" )
model.load_state_dict(_A )
def UpperCamelCase ( _A : Optional[int] , _A : Any , _A : Any , _A : Optional[int] , _A : int , _A : Optional[Any]=0 )-> Tuple:
"""simple docstring"""
os.makedirs(_A , exist_ok=_A )
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A__ = FSDP.optim_state_dict(_A , _A )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A__ = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
A__ = os.path.join(_A , _A )
logger.info(f"""Saving Optimizer state to {output_optimizer_file}""" )
torch.save(_A , _A )
logger.info(f"""Optimizer state saved in {output_optimizer_file}""" )
else:
A__ = os.path.join(_A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
os.makedirs(_A , exist_ok=_A )
logger.info(f"""Saving Optimizer state to {ckpt_dir}""" )
dist_cp.save_state_dict(
state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(_A ) , planner=DefaultSavePlanner() , )
logger.info(f"""Optimizer state saved in {ckpt_dir}""" )
def UpperCamelCase ( _A : Any , _A : Optional[Any] , _A : Union[str, Any] , _A : Union[str, Any] , _A : int , _A : Any=0 )-> Union[str, Any]:
"""simple docstring"""
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
_A , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A__ = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A__ = (
f"""{OPTIMIZER_NAME}.bin""" if optimizer_index == 0 else f"""{OPTIMIZER_NAME}_{optimizer_index}.bin"""
)
A__ = os.path.join(_A , _A )
logger.info(f"""Loading Optimizer state from {input_optimizer_file}""" )
A__ = torch.load(_A )
logger.info(f"""Optimizer state loaded from {input_optimizer_file}""" )
else:
A__ = (
os.path.join(_A , f"""{OPTIMIZER_NAME}_{optimizer_index}""" )
if f"""{OPTIMIZER_NAME}""" not in input_dir
else input_dir
)
logger.info(f"""Loading Optimizer from {ckpt_dir}""" )
A__ = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(_A ) , )
A__ = optim_state["optimizer"]
logger.info(f"""Optimizer loaded from {ckpt_dir}""" )
A__ = FSDP.optim_state_dict_to_load(_A , _A , _A )
optimizer.load_state_dict(_A )
| 232 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def UpperCamelCase ( _A : int , _A : List[str] )-> List[str]:
"""simple docstring"""
A__ = checkpoint
A__ = {}
A__ = vae_state_dict["encoder.conv_in.weight"]
A__ = vae_state_dict["encoder.conv_in.bias"]
A__ = vae_state_dict["encoder.conv_out.weight"]
A__ = vae_state_dict["encoder.conv_out.bias"]
A__ = vae_state_dict["encoder.norm_out.weight"]
A__ = vae_state_dict["encoder.norm_out.bias"]
A__ = vae_state_dict["decoder.conv_in.weight"]
A__ = vae_state_dict["decoder.conv_in.bias"]
A__ = vae_state_dict["decoder.conv_out.weight"]
A__ = vae_state_dict["decoder.conv_out.bias"]
A__ = vae_state_dict["decoder.norm_out.weight"]
A__ = vae_state_dict["decoder.norm_out.bias"]
A__ = vae_state_dict["quant_conv.weight"]
A__ = vae_state_dict["quant_conv.bias"]
A__ = vae_state_dict["post_quant_conv.weight"]
A__ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
A__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f"""down.{layer_id}""" in key] for layer_id in range(_A )
}
# Retrieves the keys for the decoder up blocks only
A__ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
A__ = {
layer_id: [key for key in vae_state_dict if f"""up.{layer_id}""" in key] for layer_id in range(_A )
}
for i in range(_A ):
A__ = [key for key in down_blocks[i] if f"""down.{i}""" in key and f"""down.{i}.downsample""" not in key]
if f"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.weight""" )
A__ = vae_state_dict.pop(
f"""encoder.down.{i}.downsample.conv.bias""" )
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""down.{i}.block""", "new": f"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "encoder.mid.block" in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f"""encoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
A__ = renew_vae_attention_paths(_A )
A__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
conv_attn_to_linear(_A )
for i in range(_A ):
A__ = num_up_blocks - 1 - i
A__ = [
key for key in up_blocks[block_id] if f"""up.{block_id}""" in key and f"""up.{block_id}.upsample""" not in key
]
if f"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
A__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.weight"""
]
A__ = vae_state_dict[
f"""decoder.up.{block_id}.upsample.conv.bias"""
]
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""up.{block_id}.block""", "new": f"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "decoder.mid.block" in key]
A__ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
A__ = [key for key in mid_resnets if f"""decoder.mid.block_{i}""" in key]
A__ = renew_vae_resnet_paths(_A )
A__ = {"old": f"""mid.block_{i}""", "new": f"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
A__ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
A__ = renew_vae_attention_paths(_A )
A__ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(_A , _A , _A , additional_replacements=[meta_path] , config=_A )
conv_attn_to_linear(_A )
return new_checkpoint
def UpperCamelCase ( _A : str , _A : str , )-> str:
"""simple docstring"""
A__ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
A__ = io.BytesIO(r.content )
A__ = OmegaConf.load(_A )
A__ = 512
A__ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
A__ = {}
with safe_open(_A , framework="pt" , device="cpu" ) as f:
for key in f.keys():
A__ = f.get_tensor(_A )
else:
A__ = torch.load(_A , map_location=_A )["state_dict"]
# Convert the VAE model.
A__ = create_vae_diffusers_config(_A , image_size=_A )
A__ = custom_convert_ldm_vae_checkpoint(_A , _A )
A__ = AutoencoderKL(**_A )
vae.load_state_dict(_A )
vae.save_pretrained(_A )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase_ : List[str] = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 232 | 1 |
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _SCREAMING_SNAKE_CASE ( __lowercase : Optional[Any] , __lowercase : List[Any] , __lowercase : Dict ) -> List[str]:
"""simple docstring"""
if openai_config_file == "":
__A = OpenAIGPTConfig()
else:
__A = OpenAIGPTConfig.from_json_file(UpperCamelCase_ )
__A = OpenAIGPTModel(UpperCamelCase_ )
# Load weights from numpy
load_tf_weights_in_openai_gpt(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# Save pytorch-model
__A = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__A = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , UpperCamelCase_ )
print(f"Save configuration file to {pytorch_config_dump_path}" )
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
__a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--openai_checkpoint_folder_path",
default=None,
type=str,
required=True,
help="Path to the TensorFlow checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--openai_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained OpenAI model. \n"
"This specifies the model architecture."
),
)
__a : Dict = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 637 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
_lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 632 | 0 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
_snake_case = BlenderbotSmallConfig
_snake_case = {}
_snake_case = 'gelu'
def __init__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=13 , SCREAMING_SNAKE_CASE_=7 , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=99 , SCREAMING_SNAKE_CASE_=32 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=4 , SCREAMING_SNAKE_CASE_=37 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=0.1 , SCREAMING_SNAKE_CASE_=20 , SCREAMING_SNAKE_CASE_=2 , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=0 , )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = parent
__UpperCamelCase = batch_size
__UpperCamelCase = seq_length
__UpperCamelCase = is_training
__UpperCamelCase = use_labels
__UpperCamelCase = vocab_size
__UpperCamelCase = hidden_size
__UpperCamelCase = num_hidden_layers
__UpperCamelCase = num_attention_heads
__UpperCamelCase = intermediate_size
__UpperCamelCase = hidden_dropout_prob
__UpperCamelCase = attention_probs_dropout_prob
__UpperCamelCase = max_position_embeddings
__UpperCamelCase = eos_token_id
__UpperCamelCase = pad_token_id
__UpperCamelCase = bos_token_id
def A__ ( self )-> Any:
'''simple docstring'''
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__UpperCamelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__UpperCamelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
__UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCamelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__UpperCamelCase = prepare_blenderbot_small_inputs_dict(_a , _a , _a )
return config, inputs_dict
def A__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )-> Any:
'''simple docstring'''
__UpperCamelCase = TFBlenderbotSmallModel(config=_a ).get_decoder()
__UpperCamelCase = inputs_dict['''input_ids''']
__UpperCamelCase = input_ids[:1, :]
__UpperCamelCase = inputs_dict['''attention_mask'''][:1, :]
__UpperCamelCase = inputs_dict['''head_mask''']
__UpperCamelCase = 1
# first forward pass
__UpperCamelCase = model(_a , attention_mask=_a , head_mask=_a , use_cache=_a )
__UpperCamelCase , __UpperCamelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__UpperCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__UpperCamelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
__UpperCamelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
__UpperCamelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
__UpperCamelCase = model(_a , attention_mask=_a )[0]
__UpperCamelCase = model(_a , attention_mask=_a , past_key_values=_a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
__UpperCamelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
__UpperCamelCase = output_from_no_past[:, -3:, random_slice_idx]
__UpperCamelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_a , _a , rtol=1E-3 )
def A_ ( snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Dict , snake_case : Any=None , snake_case : str=None , snake_case : Dict=None , snake_case : Optional[int]=None , snake_case : str=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
__UpperCamelCase = tf.cast(tf.math.not_equal(lowerCAmelCase__ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__UpperCamelCase = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__UpperCamelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__UpperCamelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
_snake_case = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
_snake_case = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
_snake_case = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
_snake_case = True
_snake_case = False
_snake_case = False
def A__ ( self )-> Tuple:
'''simple docstring'''
__UpperCamelCase = TFBlenderbotSmallModelTester(self )
__UpperCamelCase = ConfigTester(self , config_class=_a )
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self )-> List[Any]:
'''simple docstring'''
__UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_a )
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
_snake_case = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
_snake_case = 'facebook/blenderbot_small-90M'
@cached_property
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def A__ ( self )-> Optional[Any]:
'''simple docstring'''
__UpperCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def A__ ( self )-> int:
'''simple docstring'''
__UpperCamelCase = self.tokenizer(self.src_text , return_tensors='''tf''' )
__UpperCamelCase = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_a , )
__UpperCamelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_a )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 721 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing the experiment tracking capability,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowercase__ : Union[str, Any] = 1_6
lowercase__ : Tuple = 3_2
def A_ ( snake_case : Accelerator , snake_case : int = 16 ) -> List[Any]:
'''simple docstring'''
__UpperCamelCase = AutoTokenizer.from_pretrained('''bert-base-cased''' )
__UpperCamelCase = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(snake_case : Tuple ):
# max_length=None => use the model max length (it's actually the default)
__UpperCamelCase = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case , max_length=snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__UpperCamelCase = datasets.map(
snake_case , batched=snake_case , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__UpperCamelCase = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__UpperCamelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__UpperCamelCase = 16
elif accelerator.mixed_precision != "no":
__UpperCamelCase = 8
else:
__UpperCamelCase = None
return tokenizer.pad(
snake_case , padding='''longest''' , max_length=snake_case , pad_to_multiple_of=snake_case , return_tensors='''pt''' , )
# Instantiate dataloaders.
__UpperCamelCase = DataLoader(
tokenized_datasets['''train'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
__UpperCamelCase = DataLoader(
tokenized_datasets['''validation'''] , shuffle=snake_case , collate_fn=snake_case , batch_size=snake_case )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowercase__ : List[str] = mocked_dataloaders # noqa: F811
def A_ ( snake_case : str , snake_case : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , snake_case ) == "1":
__UpperCamelCase = 2
# Initialize Accelerator
# New Code #
# We pass in "all" to `log_with` to grab all available trackers in the environment
# Note: If using a custom `Tracker` class, should be passed in here such as:
# >>> log_with = ["all", MyCustomTrackerClassInstance()]
if args.with_tracking:
__UpperCamelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir )
else:
__UpperCamelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__UpperCamelCase = config['''lr''']
__UpperCamelCase = int(config['''num_epochs'''] )
__UpperCamelCase = int(config['''seed'''] )
__UpperCamelCase = int(config['''batch_size'''] )
set_seed(snake_case )
__UpperCamelCase , __UpperCamelCase = get_dataloaders(snake_case , snake_case )
__UpperCamelCase = evaluate.load('''glue''' , '''mrpc''' )
# If the batch size is too big we use gradient accumulation
__UpperCamelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__UpperCamelCase = batch_size // MAX_GPU_BATCH_SIZE
__UpperCamelCase = MAX_GPU_BATCH_SIZE
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__UpperCamelCase = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__UpperCamelCase = model.to(accelerator.device )
# Instantiate optimizer
__UpperCamelCase = AdamW(params=model.parameters() , lr=snake_case )
# Instantiate scheduler
__UpperCamelCase = get_linear_schedule_with_warmup(
optimizer=snake_case , num_warmup_steps=100 , num_training_steps=(len(snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = accelerator.prepare(
snake_case , snake_case , snake_case , snake_case , snake_case )
# New Code #
# We need to initialize the trackers we use. Overall configurations can also be stored
if args.with_tracking:
__UpperCamelCase = os.path.split(snake_case )[-1].split('''.''' )[0]
accelerator.init_trackers(snake_case , snake_case )
# Now we train the model
for epoch in range(snake_case ):
model.train()
# New Code #
# For our tracking example, we will log the total loss of each epoch
if args.with_tracking:
__UpperCamelCase = 0
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.loss
# New Code #
if args.with_tracking:
total_loss += loss.detach().float()
__UpperCamelCase = loss / gradient_accumulation_steps
accelerator.backward(snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True` (the default).
batch.to(accelerator.device )
with torch.no_grad():
__UpperCamelCase = model(**snake_case )
__UpperCamelCase = outputs.logits.argmax(dim=-1 )
__UpperCamelCase , __UpperCamelCase = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=snake_case , references=snake_case , )
__UpperCamelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"epoch {epoch}:" , snake_case )
# New Code #
# To actually log, we call `Accelerator.log`
# The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int`
if args.with_tracking:
accelerator.log(
{
'''accuracy''': eval_metric['''accuracy'''],
'''f1''': eval_metric['''f1'''],
'''train_loss''': total_loss.item() / len(snake_case ),
'''epoch''': epoch,
} , step=snake_case , )
# New Code #
# When a run is finished, you should call `accelerator.end_training()`
# to close all of the open trackers
if args.with_tracking:
accelerator.end_training()
def A_ ( ) -> int:
'''simple docstring'''
__UpperCamelCase = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=snake_case , default=snake_case , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
parser.add_argument(
'''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , )
parser.add_argument(
'''--project_dir''' , type=snake_case , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , )
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(snake_case , snake_case )
if __name__ == "__main__":
main()
| 451 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ : Optional[int] = logging.get_logger(__name__)
__magic_name__ : List[Any] = {
'''microsoft/biogpt''': '''https://huggingface.co/microsoft/biogpt/resolve/main/config.json''',
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class UpperCamelCase__ ( lowerCamelCase__ ):
"""simple docstring"""
UpperCAmelCase__ = 'biogpt'
def __init__( self : List[str] , __A : List[Any]=4_2_3_8_4 , __A : Dict=1_0_2_4 , __A : Union[str, Any]=2_4 , __A : Union[str, Any]=1_6 , __A : int=4_0_9_6 , __A : Dict="gelu" , __A : Optional[Any]=0.1 , __A : Any=0.1 , __A : List[str]=1_0_2_4 , __A : List[str]=0.0_2 , __A : Union[str, Any]=1e-12 , __A : Optional[int]=True , __A : Optional[Any]=True , __A : Optional[int]=0.0 , __A : Optional[int]=0.0 , __A : Dict=1 , __A : int=0 , __A : List[Any]=2 , **__A : Optional[Any] , ):
"""simple docstring"""
_lowercase = vocab_size
_lowercase = max_position_embeddings
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = initializer_range
_lowercase = layer_norm_eps
_lowercase = scale_embedding
_lowercase = use_cache
_lowercase = layerdrop
_lowercase = activation_dropout
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
| 497 |
'''simple docstring'''
import sys
from collections import defaultdict
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[str] ):
"""simple docstring"""
_lowercase = []
def snake_case ( self : Optional[Any] , __A : List[str] ):
"""simple docstring"""
return self.node_position[vertex]
def snake_case ( self : Any , __A : Dict , __A : List[str] ):
"""simple docstring"""
_lowercase = pos
def snake_case ( self : Optional[int] , __A : Any , __A : List[Any] , __A : Union[str, Any] , __A : Any ):
"""simple docstring"""
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_lowercase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_lowercase = 2 * start + 1
else:
_lowercase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_lowercase , _lowercase = heap[smallest_child], positions[smallest_child]
_lowercase , _lowercase = (
heap[start],
positions[start],
)
_lowercase , _lowercase = temp, tempa
_lowercase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __A )
self.top_to_bottom(__A , __A , __A , __A )
def snake_case ( self : Dict , __A : Tuple , __A : Union[str, Any] , __A : Union[str, Any] , __A : Union[str, Any] ):
"""simple docstring"""
_lowercase = position[index]
while index != 0:
_lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_lowercase = heap[parent]
_lowercase = position[parent]
self.set_position(position[parent] , __A )
else:
_lowercase = val
_lowercase = temp
self.set_position(__A , __A )
break
_lowercase = parent
else:
_lowercase = val
_lowercase = temp
self.set_position(__A , 0 )
def snake_case ( self : int , __A : List[str] , __A : List[str] ):
"""simple docstring"""
_lowercase = len(__A ) // 2 - 1
for i in range(__A , -1 , -1 ):
self.top_to_bottom(__A , __A , len(__A ) , __A )
def snake_case ( self : int , __A : Optional[int] , __A : str ):
"""simple docstring"""
_lowercase = positions[0]
_lowercase = sys.maxsize
self.top_to_bottom(__A , 0 , len(__A ) , __A )
return temp
def A__ ( A_ ) -> int:
_lowercase = Heap()
_lowercase = [0] * len(A_ )
_lowercase = [-1] * len(A_ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_lowercase = [] # Heap of Distance of vertices from their neighboring vertex
_lowercase = []
for vertex in range(len(A_ ) ):
distance_tv.append(sys.maxsize )
positions.append(A_ )
heap.node_position.append(A_ )
_lowercase = []
_lowercase = 1
_lowercase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_lowercase = 0
_lowercase = distance
heap.heapify(A_ , A_ )
for _ in range(1 , len(A_ ) ):
_lowercase = heap.delete_minimum(A_ , A_ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_lowercase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(A_ )]
):
_lowercase = distance
heap.bottom_to_top(
A_ , heap.get_position(A_ ) , A_ , A_ )
_lowercase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__magic_name__ : List[str] = int(input('''Enter number of edges: ''').strip())
__magic_name__ : List[Any] = defaultdict(list)
for _ in range(edges_number):
__magic_name__ : Union[str, Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 497 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( __a ):
"""simple docstring"""
lowerCamelCase__: List[Any] =str(__a )
return len(__a ) == 9 and set(__a ) == set("123456789" )
def lowerCAmelCase_ ( ):
"""simple docstring"""
for base_num in range(9999 , 4999 , -1 ):
lowerCamelCase__: Optional[int] =100002 * base_num
if is_9_pandigital(__a ):
return candidate
for base_num in range(333 , 99 , -1 ):
lowerCamelCase__: Any =1002003 * base_num
if is_9_pandigital(__a ):
return candidate
return None
if __name__ == "__main__":
print(f'{solution() = }')
| 701 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
__A = re.compile(R"^(?P<major>\d+)" R"\.(?P<minor>\d+)" R"\.(?P<patch>\d+)$")
@total_ordering
@dataclass
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42
lowercase_ = None
lowercase_ = None
lowercase_ = None
lowercase_ = None
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->Tuple:
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__: List[str] =_str_to_version_tuple(self.version_str)
def __repr__(self : Union[str, Any]) ->Union[str, Any]:
'''simple docstring'''
return F"""{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"""
@property
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Optional[Any]:
'''simple docstring'''
return self.major, self.minor, self.patch
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Tuple) ->Any:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return Version(UpperCAmelCase_)
elif isinstance(UpperCAmelCase_ , UpperCAmelCase_):
return other
raise TypeError(F"""{other} (type {type(UpperCAmelCase_)}) cannot be compared to version.""")
def __eq__(self : Optional[Any] , UpperCAmelCase_ : Tuple) ->List[str]:
'''simple docstring'''
try:
lowerCamelCase__: List[Any] =self._validate_operand(UpperCAmelCase_)
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__(self : Dict , UpperCAmelCase_ : List[str]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self._validate_operand(UpperCAmelCase_)
return self.tuple < other.tuple
def __hash__(self : Any) ->Dict:
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple))
@classmethod
def SCREAMING_SNAKE_CASE_ (cls : Union[str, Any] , UpperCAmelCase_ : Optional[Any]) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: str ={f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in dic.items() if k in field_names})
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
return self.version_str
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =_VERSION_REG.match(__a )
if not res:
raise ValueError(F"""Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits.""" )
return tuple(int(__a ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
return ".".join(str(__a ) for v in version_tuple )
| 437 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {
'''google/mobilenet_v1_1.0_224''': '''https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v1_0.75_192''': '''https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json''',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : int = "mobilenet_v1"
def __init__( self , _a=3 , _a=2_2_4 , _a=1.0 , _a=8 , _a="relu6" , _a=True , _a=0.999 , _a=0.02 , _a=0.001 , **_a , ) -> List[Any]:
super().__init__(**_a )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
_a : Tuple = num_channels
_a : str = image_size
_a : Tuple = depth_multiplier
_a : Any = min_depth
_a : int = hidden_act
_a : Optional[Any] = tf_padding
_a : str = classifier_dropout_prob
_a : Optional[int] = initializer_range
_a : Any = layer_norm_eps
class UpperCAmelCase_ ( __lowercase ):
"""simple docstring"""
UpperCAmelCase__ : str = version.parse("1.11" )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def __lowercase ( self ) -> float:
return 1e-4
| 14 |
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def __UpperCAmelCase ( __a : Tuple ,__a : Dict ,__a : List[str] ,__a : Optional[Any] ,__a : Tuple ) -> Dict:
"""simple docstring"""
with open(__a ) as metadata_file:
_a : Optional[Any] = json.load(__a )
_a : List[Any] = LukeConfig(use_entity_aware_attention=__a ,**metadata['''model_config'''] )
# Load in the weights from the checkpoint_path
_a : Optional[Any] = torch.load(__a ,map_location='''cpu''' )['''module''']
# Load the entity vocab file
_a : Any = load_original_entity_vocab(__a )
# add an entry for [MASK2]
_a : Union[str, Any] = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_a : Dict = XLMRobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] )
# Add special tokens to the token vocabulary for downstream tasks
_a : Optional[int] = AddedToken('''<ent>''' ,lstrip=__a ,rstrip=__a )
_a : Tuple = AddedToken('''<ent2>''' ,lstrip=__a ,rstrip=__a )
tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F"""Saving tokenizer to {pytorch_dump_folder_path}""" )
tokenizer.save_pretrained(__a )
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''r''' ) as f:
_a : List[str] = json.load(__a )
_a : Tuple = '''MLukeTokenizer'''
with open(os.path.join(__a ,'''tokenizer_config.json''' ) ,'''w''' ) as f:
json.dump(__a ,__a )
with open(os.path.join(__a ,MLukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) ,'''w''' ) as f:
json.dump(__a ,__a )
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
# Initialize the embeddings of the special tokens
_a : str = tokenizer.convert_tokens_to_ids(['''@'''] )[0]
_a : Tuple = tokenizer.convert_tokens_to_ids(['''#'''] )[0]
_a : Any = state_dict['''embeddings.word_embeddings.weight''']
_a : Optional[int] = word_emb[ent_init_index].unsqueeze(0 )
_a : Any = word_emb[enta_init_index].unsqueeze(0 )
_a : Union[str, Any] = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_a : Tuple = state_dict[bias_name]
_a : Optional[Any] = decoder_bias[ent_init_index].unsqueeze(0 )
_a : Optional[int] = decoder_bias[enta_init_index].unsqueeze(0 )
_a : Dict = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a : Tuple = F"""encoder.layer.{layer_index}.attention.self."""
_a : List[Any] = state_dict[prefix + matrix_name]
_a : Dict = state_dict[prefix + matrix_name]
_a : List[Any] = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a : Union[str, Any] = state_dict['''entity_embeddings.entity_embeddings.weight''']
_a : Optional[int] = entity_emb[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Any = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_a : int = state_dict['''entity_predictions.bias''']
_a : int = entity_prediction_bias[entity_vocab['''[MASK]''']].unsqueeze(0 )
_a : Optional[Any] = torch.cat([entity_prediction_bias, entity_mask_bias] )
_a : Optional[int] = LukeForMaskedLM(config=__a ).eval()
state_dict.pop('''entity_predictions.decoder.weight''' )
state_dict.pop('''lm_head.decoder.weight''' )
state_dict.pop('''lm_head.decoder.bias''' )
_a : int = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('''lm_head''' ) or key.startswith('''entity_predictions''' )):
_a : Optional[int] = state_dict[key]
else:
_a : Tuple = state_dict[key]
_a , _a : int = model.load_state_dict(__a ,strict=__a )
if set(__a ) != {"luke.embeddings.position_ids"}:
raise ValueError(F"""Unexpected unexpected_keys: {unexpected_keys}""" )
if set(__a ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(F"""Unexpected missing_keys: {missing_keys}""" )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a ,task='''entity_classification''' )
_a : int = '''ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'''
_a : List[Any] = (0, 9)
_a : Tuple = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : int = model(**__a )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : List[str] = torch.Size((1, 33, 768) )
_a : Union[str, Any] = torch.tensor([[0.08_92, 0.05_96, -0.28_19], [0.01_34, 0.11_99, 0.05_73], [-0.01_69, 0.09_27, 0.06_44]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}""" )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_a : str = torch.Size((1, 1, 768) )
_a : List[Any] = torch.tensor([[-0.14_82, 0.06_09, 0.03_22]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
F"""Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is"""
F""" {expected_shape}""" )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] ,__a ,atol=1E-4 ):
raise ValueError
# Verify masked word/entity prediction
_a : Optional[int] = MLukeTokenizer.from_pretrained(__a )
_a : Dict = '''Tokyo is the capital of <mask>.'''
_a : List[str] = (24, 30)
_a : Optional[int] = tokenizer(__a ,entity_spans=[span] ,return_tensors='''pt''' )
_a : Optional[Any] = model(**__a )
_a : Any = encoding['''input_ids'''][0].tolist()
_a : Optional[Any] = input_ids.index(tokenizer.convert_tokens_to_ids('''<mask>''' ) )
_a : Any = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(__a )
_a : Any = outputs.entity_logits[0][0].argmax().item()
_a : Optional[Any] = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('''en:''' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('''Saving PyTorch model to {}'''.format(__a ) )
model.save_pretrained(__a )
def __UpperCAmelCase ( __a : List[Any] ) -> int:
"""simple docstring"""
_a : Union[str, Any] = ['''[MASK]''', '''[PAD]''', '''[UNK]''']
_a : int = [json.loads(__a ) for line in open(__a )]
_a : List[Any] = {}
for entry in data:
_a : int = entry['''id''']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_a : List[Any] = entity_id
break
_a : Dict = F"""{language}:{entity_name}"""
_a : int = entity_id
return new_mapping
if __name__ == "__main__":
a__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--checkpoint_path''', type=str, help='''Path to a pytorch_model.bin file.''')
parser.add_argument(
'''--metadata_path''', default=None, type=str, help='''Path to a metadata.json file, defining the configuration.'''
)
parser.add_argument(
'''--entity_vocab_path''',
default=None,
type=str,
help='''Path to an entity_vocab.tsv file, containing the entity vocabulary.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to where to dump the output PyTorch model.'''
)
parser.add_argument(
'''--model_size''', default='''base''', type=str, choices=['''base''', '''large'''], help='''Size of the model to be converted.'''
)
a__ = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 14 | 1 |
"""simple docstring"""
from collections import defaultdict
def UpperCAmelCase ( A__: int ) -> int:
__lowerCamelCase : int = 1
__lowerCamelCase : Optional[Any] = True
for v in tree[start]:
if v not in visited:
ret += dfs(A__ )
if ret % 2 == 0:
cuts.append(A__ )
return ret
def UpperCAmelCase ( ) -> Tuple:
dfs(1 )
if __name__ == "__main__":
a_ , a_ : Dict = 10, 9
a_ : Optional[int] = defaultdict(list)
a_ : dict[int, bool] = {}
a_ : list[int] = []
a_ : int = 0
a_ : List[Any] = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
even_tree()
print(len(cuts) - 1)
| 263 |
"""simple docstring"""
import random
from typing import Any
def UpperCAmelCase ( A__: list ) -> list[Any]:
for _ in range(len(A__ ) ):
__lowerCamelCase : List[Any] = random.randint(0 , len(A__ ) - 1 )
__lowerCamelCase : Optional[Any] = random.randint(0 , len(A__ ) - 1 )
__lowerCamelCase , __lowerCamelCase : Any = data[b], data[a]
return data
if __name__ == "__main__":
a_ : Any = [0, 1, 2, 3, 4, 5, 6, 7]
a_ : int = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 263 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.