code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import sacrebleu as scb
from packaging import version
from sacrebleu import TER
import datasets
__snake_case : str = '\\n@inproceedings{snover-etal-2006-study,\n title = "A Study of Translation Edit Rate with Targeted Human Annotation",\n author = "Snover, Matthew and\n Dorr, Bonnie and\n Schwartz, Rich and\n Micciulla, Linnea and\n Makhoul, John",\n booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers",\n month = aug # " 8-12",\n year = "2006",\n address = "Cambridge, Massachusetts, USA",\n publisher = "Association for Machine Translation in the Americas",\n url = "https://aclanthology.org/2006.amta-papers.25",\n pages = "223--231",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
__snake_case : Any = '\\nTER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a\nhypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu\n(https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found\nhere: https://github.com/jhclark/tercom.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information.\n'
__snake_case : List[str] = '\nProduces TER scores alongside the number of edits and reference length.\n\nArgs:\n predictions (list of str): The system stream (a sequence of segments).\n references (list of list of str): A list of one or more reference streams (each a sequence of segments).\n normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`.\n support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters,\n as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana.\n Only applies if `normalized = True`. Defaults to `False`.\n case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`.\n\nReturns:\n \'score\' (float): TER score (num_edits / sum_ref_lengths * 100)\n \'num_edits\' (int): The cumulative number of edits\n \'ref_length\' (float): The cumulative average reference length\n\nExamples:\n Example 1:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0}\n\n Example 2:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0}\n\n Example 3:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... normalized=True,\n ... case_sensitive=True)\n >>> print(results)\n {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5}\n\n Example 4:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0}\n\n Example 5:\n >>> predictions = ["does this sentence match??",\n ... "what about this sentence?",\n ... "What did the TER metric user say to the developer?"]\n >>> references = [["does this sentence match", "does this sentence match!?!"],\n ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"],\n ... ["Your jokes are...", "...TERrible"]]\n >>> ter = datasets.load_metric("ter")\n >>> results = ter.compute(predictions=predictions,\n ... references=references,\n ... ignore_punct=True,\n ... case_sensitive=False)\n >>> print(results)\n {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
"""simple docstring"""
def A__ ( self : Optional[int] ):
if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ):
raise ImportWarning(
'''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n'''
'''You can install it with `pip install "sacrebleu>=1.4.12"`.''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''http://www.cs.umd.edu/~snover/tercom/''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ),
} ) , codebase_urls=['''https://github.com/mjpost/sacreBLEU#ter'''] , reference_urls=[
'''https://github.com/jhclark/tercom''',
] , )
def A__ ( self : Optional[Any] , _lowerCamelCase : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , _lowerCamelCase : bool = False , ):
A__ = len(references[0] )
if any(len(_lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError('''Sacrebleu requires the same number of references for each prediction''' )
A__ = [[refs[i] for refs in references] for i in range(_lowerCamelCase )]
A__ = TER(
normalized=_lowerCamelCase , no_punct=_lowerCamelCase , asian_support=_lowerCamelCase , case_sensitive=_lowerCamelCase , )
A__ = sb_ter.corpus_score(_lowerCamelCase , _lowerCamelCase )
return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
| 571 |
"""simple docstring"""
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a_ ( __a ):
return 1 / (1 + np.exp(-z ))
def a_ ( __a , __a ):
return (-y * np.log(__a ) - (1 - y) * np.log(1 - h )).mean()
def a_ ( __a , __a , __a ):
A__ = np.dot(__a , __a )
return np.sum(y * scores - np.log(1 + np.exp(__a ) ) )
def a_ ( __a , __a , __a , __a=7_0000 ):
A__ = np.zeros(x.shape[1] )
for iterations in range(__a ):
A__ = np.dot(__a , __a )
A__ = sigmoid_function(__a )
A__ = np.dot(x.T , h - y ) / y.size
A__ = theta - alpha * gradient # updating the weights
A__ = np.dot(__a , __a )
A__ = sigmoid_function(__a )
A__ = cost_function(__a , __a )
if iterations % 100 == 0:
print(f'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
__snake_case : List[Any] = datasets.load_iris()
__snake_case : List[Any] = iris.data[:, :2]
__snake_case : List[str] = (iris.target != 0) * 1
__snake_case : List[Any] = 0.1
__snake_case : Optional[Any] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('theta: ', theta) # printing the theta i.e our weights vector
def a_ ( __a ):
return sigmoid_function(
np.dot(__a , __a ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((__snake_case) , (__snake_case)) : Tuple = (x[:, 0].min(), x[:, 0].max())
((__snake_case) , (__snake_case)) : Tuple = (x[:, 1].min(), x[:, 1].max())
((__snake_case) , (__snake_case)) : Optional[Any] = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
__snake_case : int = np.c_[xxa.ravel(), xxa.ravel()]
__snake_case : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 571 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import AlbertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.albert.modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
)
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Any=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Union[str, Any]=99 , UpperCamelCase__ : str=32 , UpperCamelCase__ : str=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : List[Any]=37 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Any=16 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : str=4 , ) -> Tuple:
_UpperCamelCase =parent
_UpperCamelCase =batch_size
_UpperCamelCase =seq_length
_UpperCamelCase =is_training
_UpperCamelCase =use_attention_mask
_UpperCamelCase =use_token_type_ids
_UpperCamelCase =use_labels
_UpperCamelCase =vocab_size
_UpperCamelCase =hidden_size
_UpperCamelCase =num_hidden_layers
_UpperCamelCase =num_attention_heads
_UpperCamelCase =intermediate_size
_UpperCamelCase =hidden_act
_UpperCamelCase =hidden_dropout_prob
_UpperCamelCase =attention_probs_dropout_prob
_UpperCamelCase =max_position_embeddings
_UpperCamelCase =type_vocab_size
_UpperCamelCase =type_sequence_label_size
_UpperCamelCase =initializer_range
_UpperCamelCase =num_choices
def UpperCamelCase__ ( self : int ) -> Optional[int]:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase =None
if self.use_attention_mask:
_UpperCamelCase =random_attention_mask([self.batch_size, self.seq_length] )
_UpperCamelCase =None
if self.use_token_type_ids:
_UpperCamelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase =AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self : int ) -> List[str]:
_UpperCamelCase =self.prepare_config_and_inputs()
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase =config_and_inputs
_UpperCamelCase ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase ( lowercase_ , unittest.TestCase):
"""simple docstring"""
lowerCAmelCase_ = (
(
FlaxAlbertModel,
FlaxAlbertForPreTraining,
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self : Tuple ) -> Optional[int]:
_UpperCamelCase =FlaxAlbertModelTester(self )
@slow
def UpperCamelCase__ ( self : List[str] ) -> Union[str, Any]:
for model_class_name in self.all_model_classes:
_UpperCamelCase =model_class_name.from_pretrained('''albert-base-v2''' )
_UpperCamelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCamelCase__ )
@require_flax
class UpperCAmelCase ( unittest.TestCase):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : Optional[int] ) -> str:
_UpperCamelCase =FlaxAlbertModel.from_pretrained('''albert-base-v2''' )
_UpperCamelCase =np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_UpperCamelCase =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
_UpperCamelCase =model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )[0]
_UpperCamelCase =(1, 11, 768)
self.assertEqual(output.shape , UpperCamelCase__ )
_UpperCamelCase =np.array(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase__ , atol=1E-4 ) )
| 271 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger(__name__)
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =SwinConfig(
embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=['''stage2''', '''stage3''', '''stage4'''] , )
_UpperCamelCase =DetaConfig(
backbone_config=__SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2048 , decoder_ffn_dim=2048 , num_feature_levels=5 , assign_first_stage=__SCREAMING_SNAKE_CASE , with_box_refine=__SCREAMING_SNAKE_CASE , two_stage=__SCREAMING_SNAKE_CASE , )
# set labels
_UpperCamelCase ='''huggingface/label-files'''
if "o365" in model_name:
_UpperCamelCase =366
_UpperCamelCase ='''object365-id2label.json'''
else:
_UpperCamelCase =91
_UpperCamelCase ='''coco-detection-id2label.json'''
_UpperCamelCase =num_labels
_UpperCamelCase =json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='''dataset''' ) ) , '''r''' ) )
_UpperCamelCase ={int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCamelCase =idalabel
_UpperCamelCase ={v: k for k, v in idalabel.items()}
return config
def _a (__SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[]
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.patch_embed.proj.weight''', '''model.backbone.model.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.proj.bias''', '''model.backbone.model.embeddings.patch_embeddings.projection.bias''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.weight''', '''model.backbone.model.embeddings.norm.weight''') )
rename_keys.append(('''backbone.0.body.patch_embed.norm.bias''', '''model.backbone.model.embeddings.norm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.norm2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias''', f'''model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias''') )
if i < 3:
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.reduction.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.reduction.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.weight''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.weight''') )
rename_keys.append((f'''backbone.0.body.layers.{i}.downsample.norm.bias''', f'''model.backbone.model.encoder.layers.{i}.downsample.norm.bias''') )
rename_keys.append(('''backbone.0.body.norm1.weight''', '''model.backbone.model.hidden_states_norms.stage2.weight''') )
rename_keys.append(('''backbone.0.body.norm1.bias''', '''model.backbone.model.hidden_states_norms.stage2.bias''') )
rename_keys.append(('''backbone.0.body.norm2.weight''', '''model.backbone.model.hidden_states_norms.stage3.weight''') )
rename_keys.append(('''backbone.0.body.norm2.bias''', '''model.backbone.model.hidden_states_norms.stage3.bias''') )
rename_keys.append(('''backbone.0.body.norm3.weight''', '''model.backbone.model.hidden_states_norms.stage4.weight''') )
rename_keys.append(('''backbone.0.body.norm3.bias''', '''model.backbone.model.hidden_states_norms.stage4.bias''') )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias''', f'''model.encoder.layers.{i}.self_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.weight''', f'''model.encoder.layers.{i}.self_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.attention_weights.bias''', f'''model.encoder.layers.{i}.self_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.weight''', f'''model.encoder.layers.{i}.self_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.value_proj.bias''', f'''model.encoder.layers.{i}.self_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.weight''', f'''model.encoder.layers.{i}.self_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.self_attn.output_proj.bias''', f'''model.encoder.layers.{i}.self_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.weight''', f'''model.encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm1.bias''', f'''model.encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''model.encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''model.encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''model.encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''model.encoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.weight''', f'''model.encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''model.encoder.layers.{i}.final_layer_norm.bias''') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias''', f'''model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.weight''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.attention_weights.bias''', f'''model.decoder.layers.{i}.encoder_attn.attention_weights.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.value_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.value_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.weight''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.cross_attn.output_proj.bias''', f'''model.decoder.layers.{i}.encoder_attn.output_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.weight''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm1.bias''', f'''model.decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', f'''model.decoder.layers.{i}.self_attn.out_proj.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''model.decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.weight''', f'''model.decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm2.bias''', f'''model.decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''model.decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''model.decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''model.decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''model.decoder.layers.{i}.fc2.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.weight''', f'''model.decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''model.decoder.layers.{i}.final_layer_norm.bias''') )
# fmt: on
return rename_keys
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =dct.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =[int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_UpperCamelCase =num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight''' )
_UpperCamelCase =state_dict.pop(f'''backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:dim, :]
_UpperCamelCase =in_proj_bias[: dim]
_UpperCamelCase =in_proj_weight[
dim : dim * 2, :
]
_UpperCamelCase =in_proj_bias[
dim : dim * 2
]
_UpperCamelCase =in_proj_weight[
-dim :, :
]
_UpperCamelCase =in_proj_bias[-dim :]
# fmt: on
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
_UpperCamelCase =state_dict.pop(f'''transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase =in_proj_weight[:hidden_size, :]
_UpperCamelCase =in_proj_bias[:hidden_size]
_UpperCamelCase =in_proj_weight[
hidden_size : hidden_size * 2, :
]
_UpperCamelCase =in_proj_bias[hidden_size : hidden_size * 2]
_UpperCamelCase =in_proj_weight[-hidden_size:, :]
_UpperCamelCase =in_proj_bias[-hidden_size:]
def _a ():
"""simple docstring"""
_UpperCamelCase ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase =Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_UpperCamelCase =get_deta_config(__SCREAMING_SNAKE_CASE )
# load original state dict
if model_name == "deta-swin-large":
_UpperCamelCase =hf_hub_download(repo_id='''nielsr/deta-checkpoints''' , filename='''adet_swin_ft.pth''' )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =hf_hub_download(repo_id='''jozhang97/deta-swin-l-o365''' , filename='''deta_swin_pt_o365.pth''' )
else:
raise ValueError(f'''Model name {model_name} not supported''' )
_UpperCamelCase =torch.load(__SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
# original state dict
for name, param in state_dict.items():
print(__SCREAMING_SNAKE_CASE , param.shape )
# rename keys
_UpperCamelCase =create_rename_keys(__SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config )
read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "input_proj" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_UpperCamelCase =state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCamelCase =val
# finally, create HuggingFace model and load state dict
_UpperCamelCase =DetaForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
_UpperCamelCase ='''cuda''' if torch.cuda.is_available() else '''cpu'''
model.to(__SCREAMING_SNAKE_CASE )
# load image processor
_UpperCamelCase =DetaImageProcessor(format='''coco_detection''' )
# verify our conversion on image
_UpperCamelCase =prepare_img()
_UpperCamelCase =processor(images=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' )
_UpperCamelCase =encoding['''pixel_values''']
_UpperCamelCase =model(pixel_values.to(__SCREAMING_SNAKE_CASE ) )
# verify logits
print('''Logits:''' , outputs.logits[0, :3, :3] )
print('''Boxes:''' , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_UpperCamelCase =torch.tensor(
[[-7.6_3_0_8, -2.8_4_8_5, -5.3_7_3_7], [-7.2_0_3_7, -4.5_5_0_5, -4.8_0_2_7], [-7.2_9_4_3, -4.2_6_1_1, -4.6_6_1_7]] )
_UpperCamelCase =torch.tensor([[0.4_9_8_7, 0.4_9_6_9, 0.9_9_9_9], [0.2_5_4_9, 0.5_4_9_8, 0.4_8_0_5], [0.5_4_9_8, 0.2_7_5_7, 0.0_5_6_9]] )
elif model_name == "deta-swin-large-o365":
_UpperCamelCase =torch.tensor(
[[-8.0_1_2_2, -3.5_7_2_0, -4.9_7_1_7], [-8.1_5_4_7, -3.6_8_8_6, -4.6_3_8_9], [-7.6_6_1_0, -3.6_1_9_4, -5.0_1_3_4]] )
_UpperCamelCase =torch.tensor([[0.2_5_2_3, 0.5_5_4_9, 0.4_8_8_1], [0.7_7_1_5, 0.4_1_4_9, 0.4_6_0_1], [0.5_5_0_3, 0.2_7_5_3, 0.0_5_7_5]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__SCREAMING_SNAKE_CASE ) , atol=1E-4 )
print('''Everything ok!''' )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'''Saving PyTorch model and processor to {pytorch_dump_folder_path}...''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
# Push to hub
if push_to_hub:
print('''Pushing model and processor to hub...''' )
model.push_to_hub(f'''jozhang97/{model_name}''' )
processor.push_to_hub(f'''jozhang97/{model_name}''' )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
type=str,
default='deta-swin-large',
choices=['deta-swin-large', 'deta-swin-large-o365'],
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default=None,
type=str,
help='Path to the folder to output PyTorch model.',
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 271 | 1 |
import math
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = len(A )
UpperCamelCase__ = int(math.floor(math.sqrt(A ) ) )
UpperCamelCase__ = 0
while arr[min(A , A ) - 1] < x:
UpperCamelCase__ = step
step += int(math.floor(math.sqrt(A ) ) )
if prev >= n:
return -1
while arr[prev] < x:
UpperCamelCase__ = prev + 1
if prev == min(A , A ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__magic_name__ =input('''Enter numbers separated by a comma:\n''').strip()
__magic_name__ =[int(item) for item in user_input.split(''',''')]
__magic_name__ =int(input('''Enter the number to be searched:\n'''))
__magic_name__ =jump_search(arr, x)
if res == -1:
print('''Number not found!''')
else:
print(f"""Number {x} is at index {res}""")
| 415 | from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def __UpperCamelCase ( A , A , A=1e-12 ):
UpperCamelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
UpperCamelCase__ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(A , axis=1 ) , a_min=A ) ).T
return jnp.matmul(A , norm_emb_a.T )
class _A ( nn.Module ):
SCREAMING_SNAKE_CASE_ : CLIPConfig
SCREAMING_SNAKE_CASE_ : jnp.dtype =jnp.floataa
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = FlaxCLIPVisionModule(self.config.vision_config )
UpperCamelCase__ = nn.Dense(self.config.projection_dim , use_bias=SCREAMING_SNAKE_CASE_ , dtype=self.dtype )
UpperCamelCase__ = self.param('''concept_embeds''' , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCamelCase__ = self.param(
'''special_care_embeds''' , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCamelCase__ = self.param('''concept_embeds_weights''' , jax.nn.initializers.ones , (17,) )
UpperCamelCase__ = self.param('''special_care_embeds_weights''' , jax.nn.initializers.ones , (3,) )
def __call__(self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = self.vision_model(SCREAMING_SNAKE_CASE_ )[1]
UpperCamelCase__ = self.visual_projection(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = jax_cosine_distance(SCREAMING_SNAKE_CASE_ , self.special_care_embeds )
UpperCamelCase__ = jax_cosine_distance(SCREAMING_SNAKE_CASE_ , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCamelCase__ = 0.0
UpperCamelCase__ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCamelCase__ = jnp.round(SCREAMING_SNAKE_CASE_ , 3 )
UpperCamelCase__ = jnp.any(special_scores > 0 , axis=1 , keepdims=SCREAMING_SNAKE_CASE_ )
# Use a lower threshold if an image has any special care concept
UpperCamelCase__ = is_special_care * 0.01
UpperCamelCase__ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCamelCase__ = jnp.round(SCREAMING_SNAKE_CASE_ , 3 )
UpperCamelCase__ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] =CLIPConfig
SCREAMING_SNAKE_CASE_ : Dict ="clip_input"
SCREAMING_SNAKE_CASE_ : Union[str, Any] =FlaxStableDiffusionSafetyCheckerModule
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = jnp.floataa , SCREAMING_SNAKE_CASE_ = True , **SCREAMING_SNAKE_CASE_ , ) -> List[str]:
'''simple docstring'''
if input_shape is None:
UpperCamelCase__ = (1, 224, 224, 3)
UpperCamelCase__ = self.module_class(config=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
super().__init__(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , input_shape=SCREAMING_SNAKE_CASE_ , seed=SCREAMING_SNAKE_CASE_ , dtype=SCREAMING_SNAKE_CASE_ , _do_init=_do_init )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ) -> FrozenDict:
'''simple docstring'''
UpperCamelCase__ = jax.random.normal(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = jax.random.split(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = {'''params''': params_rng, '''dropout''': dropout_rng}
UpperCamelCase__ = self.module.init(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )['''params''']
return random_params
def __call__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , ) -> str:
'''simple docstring'''
UpperCamelCase__ = jnp.transpose(SCREAMING_SNAKE_CASE_ , (0, 2, 3, 1) )
return self.module.apply(
{'''params''': params or self.params} , jnp.array(SCREAMING_SNAKE_CASE_ , dtype=jnp.floataa ) , rngs={} , )
| 415 | 1 |
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCamelCase : Optional[int] = logging.get_logger(__name__)
class _UpperCamelCase (lowercase__ ):
snake_case_ = ["""pixel_values"""]
def __init__( self , __UpperCamelCase = True , __UpperCamelCase = 3_2 , __UpperCamelCase=PILImageResampling.BILINEAR , __UpperCamelCase = True , **__UpperCamelCase , )-> Optional[Any]:
__lowerCAmelCase = do_resize
__lowerCAmelCase = do_rescale
__lowerCAmelCase = size_divisor
__lowerCAmelCase = resample
super().__init__(**__lowerCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase )-> Optional[Any]:
__lowerCAmelCase = get_image_size(__lowerCamelCase )
# Rounds the height and width down to the closest multiple of size_divisor
__lowerCAmelCase = height // size_divisor * size_divisor
__lowerCAmelCase = width // size_divisor * size_divisor
__lowerCAmelCase = resize(__lowerCamelCase , (new_h, new_w) , resample=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
return image
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , **__UpperCamelCase )-> List[str]:
return rescale(image=__lowerCamelCase , scale=__lowerCamelCase , data_format=__lowerCamelCase , **__lowerCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase=None , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = ChannelDimension.FIRST , **__UpperCamelCase , )-> Optional[Any]:
__lowerCAmelCase = do_resize if do_resize is not None else self.do_resize
__lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
__lowerCAmelCase = size_divisor if size_divisor is not None else self.size_divisor
__lowerCAmelCase = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError("size_divisor is required for resizing" )
__lowerCAmelCase = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError("Invalid image(s)" )
# All transformations expect numpy arrays.
__lowerCAmelCase = [to_numpy_array(__lowerCamelCase ) for img in images]
if do_resize:
__lowerCAmelCase = [self.resize(__lowerCamelCase , size_divisor=__lowerCamelCase , resample=__lowerCamelCase ) for image in images]
if do_rescale:
__lowerCAmelCase = [self.rescale(__lowerCamelCase , scale=1 / 2_5_5 ) for image in images]
__lowerCAmelCase = [to_channel_dimension_format(__lowerCamelCase , __lowerCamelCase ) for image in images]
__lowerCAmelCase = {"pixel_values": images}
return BatchFeature(data=__lowerCamelCase , tensor_type=__lowerCamelCase ) | 705 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
lowerCamelCase : Any = logging.get_logger(__name__)
lowerCamelCase : Tuple = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCamelCase : Optional[int] = {
'''vocab_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : List[str] = {
'''vocab_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : Any = {
'''vocab_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'''
),
},
}
lowerCamelCase : str = {
'''facebook/dpr-ctx_encoder-single-nq-base''': 512,
'''facebook/dpr-ctx_encoder-multiset-base''': 512,
}
lowerCamelCase : Any = {
'''facebook/dpr-question_encoder-single-nq-base''': 512,
'''facebook/dpr-question_encoder-multiset-base''': 512,
}
lowerCamelCase : Tuple = {
'''facebook/dpr-reader-single-nq-base''': 512,
'''facebook/dpr-reader-multiset-base''': 512,
}
lowerCamelCase : Tuple = {
'''facebook/dpr-ctx_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-ctx_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : int = {
'''facebook/dpr-question_encoder-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-question_encoder-multiset-base''': {'''do_lower_case''': True},
}
lowerCamelCase : List[Any] = {
'''facebook/dpr-reader-single-nq-base''': {'''do_lower_case''': True},
'''facebook/dpr-reader-multiset-base''': {'''do_lower_case''': True},
}
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRContextEncoderTokenizer
class _UpperCamelCase (a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = DPRQuestionEncoderTokenizer
lowerCamelCase : Union[str, Any] = collections.namedtuple(
'''DPRSpanPrediction''', ['''span_score''', '''relevance_score''', '''doc_id''', '''start_index''', '''end_index''', '''text''']
)
lowerCamelCase : List[str] = collections.namedtuple('''DPRReaderOutput''', ['''start_logits''', '''end_logits''', '''relevance_logits'''])
lowerCamelCase : List[Any] = R'''
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `\'tf\'`: Return TensorFlow `tf.constant` objects.
- `\'pt\'`: Return PyTorch `torch.Tensor` objects.
- `\'np\'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer\'s default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
'''
@add_start_docstrings(a_ )
class _UpperCamelCase :
def __call__( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = False , __UpperCamelCase = False , __UpperCamelCase = None , __UpperCamelCase = None , __UpperCamelCase = None , **__UpperCamelCase , )-> BatchEncoding:
if titles is None and texts is None:
return super().__call__(
__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
elif titles is None or texts is None:
__lowerCAmelCase = titles if texts is None else texts
return super().__call__(
__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase , return_attention_mask=__UpperCamelCase , **__UpperCamelCase , )
__lowerCAmelCase = titles if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [titles]
__lowerCAmelCase = texts if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [texts]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = questions if not isinstance(__UpperCamelCase , __UpperCamelCase ) else [questions] * n_passages
assert len(__UpperCamelCase ) == len(
__UpperCamelCase ), F"""There should be as many titles than texts but got {len(__UpperCamelCase )} titles and {len(__UpperCamelCase )} texts."""
__lowerCAmelCase = super().__call__(__UpperCamelCase , __UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = super().__call__(__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase )["input_ids"]
__lowerCAmelCase = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCamelCase , __UpperCamelCase )
]
}
if return_attention_mask is not False:
__lowerCAmelCase = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
__lowerCAmelCase = attention_mask
return self.pad(__UpperCamelCase , padding=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors=__UpperCamelCase )
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1_6 , __UpperCamelCase = 6_4 , __UpperCamelCase = 4 , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = reader_input["input_ids"]
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = reader_output[:3]
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = sorted(range(__UpperCamelCase ) , reverse=__UpperCamelCase , key=relevance_logits.__getitem__ )
__lowerCAmelCase = []
for doc_id in sorted_docs:
__lowerCAmelCase = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
__lowerCAmelCase = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
__lowerCAmelCase = sequence_ids.index(self.pad_token_id )
else:
__lowerCAmelCase = len(__UpperCamelCase )
__lowerCAmelCase = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCamelCase , top_spans=__UpperCamelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCamelCase , start_index=__UpperCamelCase , end_index=__UpperCamelCase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(__UpperCamelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __UpperCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> List[DPRSpanPrediction]:
__lowerCAmelCase = []
for start_index, start_score in enumerate(__UpperCamelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
__lowerCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase )
__lowerCAmelCase = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, F"""Wrong span indices: [{start_index}:{end_index}]"""
__lowerCAmelCase = end_index - start_index + 1
assert length <= max_answer_length, F"""Span is too long: {length} > {max_answer_length}"""
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(__UpperCamelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(a_ )
class _UpperCamelCase (a_ , a_ ):
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = READER_PRETRAINED_VOCAB_FILES_MAP
snake_case_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = READER_PRETRAINED_INIT_CONFIGURATION
snake_case_ = ["""input_ids""", """attention_mask"""]
snake_case_ = DPRReaderTokenizer
| 290 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE_ ( _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[str] =CTRLTokenizer
__lowerCAmelCase : Optional[int] =False
__lowerCAmelCase : Any =False
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =['adapt', 're@@', 'a@@', 'apt', 'c@@', 't', '<unk>']
_lowercase =dict(zip(snake_case, range(len(snake_case))))
_lowercase =['#version: 0.2', 'a p', 'ap t</w>', 'r e', 'a d', 'ad apt</w>', '']
_lowercase ={'unk_token': '<unk>'}
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['vocab_file'])
_lowercase =os.path.join(self.tmpdirname, VOCAB_FILES_NAMES['merges_file'])
with open(self.vocab_file, 'w', encoding='utf-8') as fp:
fp.write(json.dumps(snake_case) + '\n')
with open(self.merges_file, 'w', encoding='utf-8') as fp:
fp.write('\n'.join(snake_case))
def UpperCamelCase__ ( self :Optional[int], **snake_case :int):
"""simple docstring"""
kwargs.update(self.special_tokens_map)
return CTRLTokenizer.from_pretrained(self.tmpdirname, **snake_case)
def UpperCamelCase__ ( self :Optional[int], snake_case :List[str]):
"""simple docstring"""
_lowercase ='adapt react readapt apt'
_lowercase ='adapt react readapt apt'
return input_text, output_text
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
_lowercase =CTRLTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)
_lowercase ='adapt react readapt apt'
_lowercase ='adapt re@@ a@@ c@@ t re@@ adapt apt'.split()
_lowercase =tokenizer.tokenize(snake_case)
self.assertListEqual(snake_case, snake_case)
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case), snake_case)
| 181 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowerCAmelCase : Optional[int] =PegasusConfig
__lowerCAmelCase : Dict ={}
__lowerCAmelCase : List[Any] ='''gelu'''
def __init__( self :Optional[int], snake_case :Union[str, Any], snake_case :str=13, snake_case :Tuple=7, snake_case :str=True, snake_case :Dict=False, snake_case :List[Any]=99, snake_case :Any=32, snake_case :Tuple=2, snake_case :Optional[Any]=4, snake_case :List[str]=37, snake_case :str=0.1, snake_case :Any=0.1, snake_case :str=40, snake_case :str=2, snake_case :Union[str, Any]=1, snake_case :Tuple=0, ):
"""simple docstring"""
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =eos_token_id
_lowercase =pad_token_id
_lowercase =bos_token_id
def UpperCamelCase__ ( self :Optional[int]):
"""simple docstring"""
_lowercase =ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size)
_lowercase =tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1)
_lowercase =tf.concat([input_ids, eos_tensor], axis=1)
_lowercase =ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
_lowercase =self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
_lowercase =prepare_pegasus_inputs_dict(snake_case, snake_case, snake_case)
return config, inputs_dict
def UpperCamelCase__ ( self :int, snake_case :int, snake_case :Union[str, Any]):
"""simple docstring"""
_lowercase =TFPegasusModel(config=snake_case).get_decoder()
_lowercase =inputs_dict['input_ids']
_lowercase =input_ids[:1, :]
_lowercase =inputs_dict['attention_mask'][:1, :]
_lowercase =inputs_dict['head_mask']
_lowercase =1
# first forward pass
_lowercase =model(snake_case, attention_mask=snake_case, head_mask=snake_case, use_cache=snake_case)
_lowercase , _lowercase =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3), config.vocab_size)
_lowercase =tf.cast(ids_tensor((self.batch_size, 3), 2), tf.inta)
# append to next input_ids and
_lowercase =tf.concat([input_ids, next_tokens], axis=-1)
_lowercase =tf.concat([attention_mask, next_attn_mask], axis=-1)
_lowercase =model(snake_case, attention_mask=snake_case)[0]
_lowercase =model(snake_case, attention_mask=snake_case, past_key_values=snake_case)[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1])
# select random slice
_lowercase =int(ids_tensor((1,), output_from_past.shape[-1]))
_lowercase =output_from_no_past[:, -3:, random_slice_idx]
_lowercase =output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case, snake_case, rtol=1e-3)
def _snake_case (_snake_case : str , _snake_case : Optional[Any] , _snake_case : Tuple , _snake_case : Any=None , _snake_case : Dict=None , _snake_case : Optional[Any]=None , _snake_case : Optional[int]=None , _snake_case : str=None , ) -> List[str]:
if attention_mask is None:
_lowercase =tf.cast(tf.math.not_equal(_snake_case , config.pad_token_id) , tf.inta)
if decoder_attention_mask is None:
_lowercase =tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta),
] , axis=-1 , )
if head_mask is None:
_lowercase =tf.ones((config.encoder_layers, config.encoder_attention_heads))
if decoder_head_mask is None:
_lowercase =tf.ones((config.decoder_layers, config.decoder_attention_heads))
if cross_attn_head_mask is None:
_lowercase =tf.ones((config.decoder_layers, config.decoder_attention_heads))
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class SCREAMING_SNAKE_CASE_ ( _a , _a , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =(TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__lowerCAmelCase : Dict =(TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__lowerCAmelCase : Union[str, Any] =(
{
'''conversational''': TFPegasusForConditionalGeneration,
'''feature-extraction''': TFPegasusModel,
'''summarization''': TFPegasusForConditionalGeneration,
'''text2text-generation''': TFPegasusForConditionalGeneration,
'''translation''': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__lowerCAmelCase : List[str] =True
__lowerCAmelCase : Optional[int] =False
__lowerCAmelCase : List[Any] =False
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
_lowercase =TFPegasusModelTester(self)
_lowercase =ConfigTester(self, config_class=snake_case)
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self :int):
"""simple docstring"""
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case)
@require_sentencepiece
@require_tokenizers
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : Optional[int] =[
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
__lowerCAmelCase : Union[str, Any] =[
'''California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'''
''' reduce the risk of wildfires.''',
'''N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.''',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__lowerCAmelCase : Tuple ='''google/pegasus-xsum'''
@cached_property
def UpperCamelCase__ ( self :Optional[Any]):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name)
@cached_property
def UpperCamelCase__ ( self :Dict):
"""simple docstring"""
_lowercase =TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name)
return model
def UpperCamelCase__ ( self :Any, **snake_case :int):
"""simple docstring"""
_lowercase =self.translate_src_text(**snake_case)
assert self.expected_text == generated_words
def UpperCamelCase__ ( self :str, **snake_case :Tuple):
"""simple docstring"""
_lowercase =self.tokenizer(self.src_text, **snake_case, padding=snake_case, return_tensors='tf')
_lowercase =self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=snake_case, )
_lowercase =self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=snake_case)
return generated_words
@slow
def UpperCamelCase__ ( self :Tuple):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 181 | 1 |
'''simple docstring'''
import math
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if initial_intensity < 0:
raise ValueError('''The value of intensity cannot be negative''' )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError('''In Malus Law, the angle is in the range 0-360 degrees''' )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(snake_case__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 343 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ = 50 ):
'''simple docstring'''
A : Any = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 343 | 1 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a : List[Any] = '''CompVis/stable-diffusion-v1-1'''
a : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
a : Any = '''CompVis/stable-diffusion-v1-3'''
a : Optional[int] = '''CompVis/stable-diffusion-v1-4'''
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase ):
def __init__( self : Optional[int] , a_ : AutoencoderKL , a_ : CLIPTextModel , a_ : CLIPTokenizer , a_ : UNetaDConditionModel , a_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , a_ : StableDiffusionSafetyChecker , a_ : CLIPImageProcessor , a_ : bool = True , ):
"""simple docstring"""
super()._init_()
__snake_case = StableDiffusionPipeline.from_pretrained(a_ )
__snake_case = StableDiffusionPipeline.from_pretrained(a_ )
__snake_case = StableDiffusionPipeline.from_pretrained(a_ )
__snake_case = StableDiffusionPipeline(
vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , requires_safety_checker=a_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A ( self : int ):
"""simple docstring"""
return {k: getattr(self , a_ ) for k in self.config.keys() if not k.startswith("_" )}
def A ( self : int , a_ : Optional[Union[str, int]] = "auto" ):
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a_ )
def A ( self : str ):
"""simple docstring"""
self.enable_attention_slicing(a_ )
@torch.no_grad()
def A ( self : List[str] , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Optional[int] , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : int , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Union[str, Any] , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : Union[str, Any] , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Dict , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : Any , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Optional[Any] , ):
"""simple docstring"""
return self.pipea(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
@torch.no_grad()
def A ( self : int , a_ : Union[str, List[str]] , a_ : int = 512 , a_ : int = 512 , a_ : int = 50 , a_ : float = 7.5 , a_ : Optional[Union[str, List[str]]] = None , a_ : Optional[int] = 1 , a_ : float = 0.0 , a_ : Optional[torch.Generator] = None , a_ : Optional[torch.FloatTensor] = None , a_ : Optional[str] = "pil" , a_ : bool = True , a_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , a_ : int = 1 , **a_ : Union[str, Any] , ):
"""simple docstring"""
__snake_case = "cuda" if torch.cuda.is_available() else "cpu"
self.to(a_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
__snake_case = self.textaimg_sda_a(
prompt=a_ , height=a_ , width=a_ , num_inference_steps=a_ , guidance_scale=a_ , negative_prompt=a_ , num_images_per_prompt=a_ , eta=a_ , generator=a_ , latents=a_ , output_type=a_ , return_dict=a_ , callback=a_ , callback_steps=a_ , **a_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 69 |
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
lowerCamelCase : Tuple = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : List[str] = '''align_text_model'''
def __init__( self : int , __a : Optional[int]=30522 , __a : int=768 , __a : Optional[Any]=12 , __a : Any=12 , __a : Tuple=3072 , __a : Tuple="gelu" , __a : List[Any]=0.1 , __a : Optional[int]=0.1 , __a : Dict=512 , __a : List[Any]=2 , __a : Dict=0.02 , __a : Optional[int]=1E-12 , __a : int=0 , __a : Optional[int]="absolute" , __a : Tuple=True , **__a : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Tuple = vocab_size
__lowercase : Dict = hidden_size
__lowercase : Tuple = num_hidden_layers
__lowercase : Union[str, Any] = num_attention_heads
__lowercase : Optional[Any] = hidden_act
__lowercase : Tuple = intermediate_size
__lowercase : List[str] = hidden_dropout_prob
__lowercase : List[str] = attention_probs_dropout_prob
__lowercase : Any = max_position_embeddings
__lowercase : str = type_vocab_size
__lowercase : List[str] = initializer_range
__lowercase : Optional[int] = layer_norm_eps
__lowercase : Optional[int] = position_embedding_type
__lowercase : Union[str, Any] = use_cache
__lowercase : int = pad_token_id
@classmethod
def lowerCAmelCase ( cls : Tuple , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : List[Any] = cls.get_config_dict(__a , **__a )
# get the text config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__lowercase : Tuple = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : int = '''align_vision_model'''
def __init__( self : List[str] , __a : int = 3 , __a : int = 600 , __a : float = 2.0 , __a : float = 3.1 , __a : int = 8 , __a : List[int] = [3, 3, 5, 3, 5, 5, 3] , __a : List[int] = [32, 16, 24, 40, 80, 112, 192] , __a : List[int] = [16, 24, 40, 80, 112, 192, 320] , __a : List[int] = [] , __a : List[int] = [1, 2, 2, 2, 1, 2, 1] , __a : List[int] = [1, 2, 2, 3, 3, 4, 1] , __a : List[int] = [1, 6, 6, 6, 6, 6, 6] , __a : float = 0.25 , __a : str = "swish" , __a : int = 2560 , __a : str = "mean" , __a : float = 0.02 , __a : float = 0.001 , __a : float = 0.99 , __a : float = 0.2 , **__a : Union[str, Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(**__a )
__lowercase : Any = num_channels
__lowercase : Tuple = image_size
__lowercase : Tuple = width_coefficient
__lowercase : Any = depth_coefficient
__lowercase : str = depth_divisor
__lowercase : Union[str, Any] = kernel_sizes
__lowercase : int = in_channels
__lowercase : List[Any] = out_channels
__lowercase : int = depthwise_padding
__lowercase : Union[str, Any] = strides
__lowercase : Optional[int] = num_block_repeats
__lowercase : List[str] = expand_ratios
__lowercase : int = squeeze_expansion_ratio
__lowercase : str = hidden_act
__lowercase : List[str] = hidden_dim
__lowercase : Dict = pooling_type
__lowercase : Any = initializer_range
__lowercase : Tuple = batch_norm_eps
__lowercase : int = batch_norm_momentum
__lowercase : Tuple = drop_connect_rate
__lowercase : Tuple = sum(__a ) * 4
@classmethod
def lowerCAmelCase ( cls : str , __a : Union[str, os.PathLike] , **__a : Union[str, Any] ) -> "PretrainedConfig":
"""simple docstring"""
cls._set_token_in_kwargs(__a )
__lowercase , __lowercase : Optional[int] = cls.get_config_dict(__a , **__a )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get("""model_type""" ) == "align":
__lowercase : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
F"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(__a , **__a )
class lowerCAmelCase ( __a ):
'''simple docstring'''
_A : Union[str, Any] = '''align'''
_A : Optional[int] = True
def __init__( self : Optional[Any] , __a : Optional[int]=None , __a : str=None , __a : int=640 , __a : List[Any]=1.0 , __a : Optional[int]=0.02 , **__a : List[Any] , ) -> Any:
"""simple docstring"""
super().__init__(**__a )
if text_config is None:
__lowercase : Optional[Any] = {}
logger.info("""text_config is None. Initializing the AlignTextConfig with default values.""" )
if vision_config is None:
__lowercase : Dict = {}
logger.info("""vision_config is None. Initializing the AlignVisionConfig with default values.""" )
__lowercase : str = AlignTextConfig(**__a )
__lowercase : int = AlignVisionConfig(**__a )
__lowercase : str = projection_dim
__lowercase : Optional[int] = temperature_init_value
__lowercase : Dict = initializer_range
@classmethod
def lowerCAmelCase ( cls : List[Any] , __a : AlignTextConfig , __a : AlignVisionConfig , **__a : Any ) -> Any:
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__a )
def lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
__lowercase : List[str] = copy.deepcopy(self.__dict__ )
__lowercase : Tuple = self.text_config.to_dict()
__lowercase : List[Any] = self.vision_config.to_dict()
__lowercase : List[str] = self.__class__.model_type
return output | 149 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCamelCase__: Union[str, Any] = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__: List[Any] = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
UpperCamelCase__: int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
import logging
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEncoder,
BertModel,
BertPreTrainedModel,
)
UpperCamelCase__: List[str] = logging.getLogger(__name__)
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def A ( self : str , __snake_case : Tuple , __snake_case : int , __snake_case : List[Any]=None , __snake_case : List[Any]=None ) -> Optional[int]:
UpperCAmelCase : Union[str, Any] = self.layer[current_layer](__snake_case , __snake_case , head_mask[current_layer] )
UpperCAmelCase : Optional[Any] = layer_outputs[0]
return hidden_states
@add_start_docstrings(
"""The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.""" , A__ , )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : int , __snake_case : Any ) -> Any:
super().__init__(__snake_case )
UpperCAmelCase : Any = BertEncoderWithPabee(__snake_case )
self.init_weights()
UpperCAmelCase : Optional[Any] = 0
UpperCAmelCase : List[str] = 0
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = 0
def A ( self : List[str] , __snake_case : List[str] ) -> List[Any]:
UpperCAmelCase : Union[str, Any] = threshold
def A ( self : str , __snake_case : List[str] ) -> Optional[Any]:
UpperCAmelCase : List[str] = patience
def A ( self : Dict ) -> str:
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : Dict = 0
def A ( self : Optional[Any] ) -> Optional[int]:
UpperCAmelCase : Dict = self.inference_layers_num / self.inference_instances_num
UpperCAmelCase : int = (
F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up ="""
F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***"""
)
print(__snake_case )
@add_start_docstrings_to_model_forward(__snake_case )
def A ( self : Union[str, Any] , __snake_case : Optional[Any]=None , __snake_case : str=None , __snake_case : int=None , __snake_case : Optional[Any]=None , __snake_case : Tuple=None , __snake_case : Tuple=None , __snake_case : Tuple=None , __snake_case : Optional[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : Optional[int]=None , __snake_case : Optional[int]=False , ) -> List[str]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' )
elif input_ids is not None:
UpperCAmelCase : Union[str, Any] = input_ids.size()
elif inputs_embeds is not None:
UpperCAmelCase : List[Any] = inputs_embeds.size()[:-1]
else:
raise ValueError('''You have to specify either input_ids or inputs_embeds''' )
UpperCAmelCase : Optional[Any] = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCAmelCase : Optional[Any] = torch.ones(__snake_case , device=__snake_case )
if token_type_ids is None:
UpperCAmelCase : Tuple = torch.zeros(__snake_case , dtype=torch.long , device=__snake_case )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCAmelCase : torch.Tensor = self.get_extended_attention_mask(__snake_case , __snake_case , __snake_case )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if self.config.is_decoder and encoder_hidden_states is not None:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = encoder_hidden_states.size()
UpperCAmelCase : List[Any] = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
UpperCAmelCase : str = torch.ones(__snake_case , device=__snake_case )
UpperCAmelCase : Tuple = self.invert_attention_mask(__snake_case )
else:
UpperCAmelCase : List[str] = None
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCAmelCase : List[Any] = self.get_head_mask(__snake_case , self.config.num_hidden_layers )
UpperCAmelCase : Any = self.embeddings(
input_ids=__snake_case , position_ids=__snake_case , token_type_ids=__snake_case , inputs_embeds=__snake_case )
UpperCAmelCase : Optional[int] = embedding_output
if self.training:
UpperCAmelCase : str = []
for i in range(self.config.num_hidden_layers ):
UpperCAmelCase : Optional[int] = self.encoder.adaptive_forward(
__snake_case , current_layer=__snake_case , attention_mask=__snake_case , head_mask=__snake_case )
UpperCAmelCase : List[str] = self.pooler(__snake_case )
UpperCAmelCase : Dict = output_layers[i](output_dropout(__snake_case ) )
res.append(__snake_case )
elif self.patience == 0: # Use all layers for inference
UpperCAmelCase : Any = self.encoder(
__snake_case , attention_mask=__snake_case , head_mask=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : Dict = self.pooler(encoder_outputs[0] )
UpperCAmelCase : List[Any] = [output_layers[self.config.num_hidden_layers - 1](__snake_case )]
else:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = None
UpperCAmelCase : Optional[Any] = 0
for i in range(self.config.num_hidden_layers ):
calculated_layer_num += 1
UpperCAmelCase : Optional[Any] = self.encoder.adaptive_forward(
__snake_case , current_layer=__snake_case , attention_mask=__snake_case , head_mask=__snake_case )
UpperCAmelCase : List[str] = self.pooler(__snake_case )
UpperCAmelCase : Optional[int] = output_layers[i](__snake_case )
if regression:
UpperCAmelCase : Union[str, Any] = logits.detach()
if patient_result is not None:
UpperCAmelCase : List[str] = patient_result.detach()
if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold:
patient_counter += 1
else:
UpperCAmelCase : Dict = 0
else:
UpperCAmelCase : Union[str, Any] = logits.detach().argmax(dim=1 )
if patient_result is not None:
UpperCAmelCase : str = patient_result.detach().argmax(dim=1 )
if (patient_result is not None) and torch.all(labels.eq(__snake_case ) ):
patient_counter += 1
else:
UpperCAmelCase : Union[str, Any] = 0
UpperCAmelCase : List[str] = logits
if patient_counter == self.patience:
break
UpperCAmelCase : List[Any] = [patient_result]
self.inference_layers_num += calculated_layer_num
self.inference_instances_num += 1
return res
@add_start_docstrings(
"""Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of
the pooled output) e.g. for GLUE tasks. """ , A__ , )
class SCREAMING_SNAKE_CASE( A__ ):
"""simple docstring"""
def __init__( self : str , __snake_case : str ) -> Optional[Any]:
super().__init__(__snake_case )
UpperCAmelCase : Optional[int] = config.num_labels
UpperCAmelCase : Optional[Any] = BertModelWithPabee(__snake_case )
UpperCAmelCase : Tuple = nn.Dropout(config.hidden_dropout_prob )
UpperCAmelCase : Dict = nn.ModuleList(
[nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] )
self.init_weights()
@add_start_docstrings_to_model_forward(__snake_case )
def A ( self : Any , __snake_case : Dict=None , __snake_case : str=None , __snake_case : List[Any]=None , __snake_case : List[Any]=None , __snake_case : Union[str, Any]=None , __snake_case : int=None , __snake_case : str=None , ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = self.bert(
input_ids=__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , position_ids=__snake_case , head_mask=__snake_case , inputs_embeds=__snake_case , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , )
UpperCAmelCase : List[str] = (logits[-1],)
if labels is not None:
UpperCAmelCase : Tuple = None
UpperCAmelCase : Any = 0
for ix, logits_item in enumerate(__snake_case ):
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase : Optional[Any] = MSELoss()
UpperCAmelCase : Optional[Any] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) )
else:
UpperCAmelCase : Dict = CrossEntropyLoss()
UpperCAmelCase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) )
if total_loss is None:
UpperCAmelCase : Dict = loss
else:
total_loss += loss * (ix + 1)
total_weights += ix + 1
UpperCAmelCase : Any = (total_loss / total_weights,) + outputs
return outputs
| 528 | 0 |
from packaging import version
from .import_utils import is_accelerate_available
if is_accelerate_available():
import accelerate
def UpperCamelCase_( lowerCamelCase_ ) -> Union[str, Any]:
if not is_accelerate_available():
return method
_lowercase : int = version.parse(accelerate.__version__ ).base_version
if version.parse(lowerCamelCase_ ) < version.parse('0.17.0' ):
return method
def wrapper(self , *lowerCamelCase_ , **lowerCamelCase_ ):
if hasattr(self , '_hf_hook' ) and hasattr(self._hf_hook , 'pre_forward' ):
self._hf_hook.pre_forward(self )
return method(self , *lowerCamelCase_ , **lowerCamelCase_ )
return wrapper
| 89 |
"""simple docstring"""
from __future__ import annotations
SCREAMING_SNAKE_CASE_ = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
SCREAMING_SNAKE_CASE_ = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def lowercase__ ( lowerCAmelCase : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase = []
UpperCAmelCase = len(lowerCAmelCase )
for i in range(lowerCAmelCase ):
UpperCAmelCase = -1
for j in range(i + 1 , lowerCAmelCase ):
if arr[i] < arr[j]:
UpperCAmelCase = arr[j]
break
result.append(lowerCAmelCase )
return result
def lowercase__ ( lowerCAmelCase : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase = []
for i, outer in enumerate(lowerCAmelCase ):
UpperCAmelCase = -1
for inner in arr[i + 1 :]:
if outer < inner:
UpperCAmelCase = inner
break
result.append(lowerCAmelCase )
return result
def lowercase__ ( lowerCAmelCase : list[float] ) -> list[float]:
"""simple docstring"""
UpperCAmelCase = len(lowerCAmelCase )
UpperCAmelCase = []
UpperCAmelCase = [-1] * arr_size
for index in reversed(range(lowerCAmelCase ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
UpperCAmelCase = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
SCREAMING_SNAKE_CASE_ = (
'''from __main__ import arr, next_greatest_element_slow, '''
'''next_greatest_element_fast, next_greatest_element'''
)
print(
'''next_greatest_element_slow():''',
timeit('''next_greatest_element_slow(arr)''', setup=setup),
)
print(
'''next_greatest_element_fast():''',
timeit('''next_greatest_element_fast(arr)''', setup=setup),
)
print(
''' next_greatest_element():''',
timeit('''next_greatest_element(arr)''', setup=setup),
)
| 373 | 0 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _a ( A__ ):
"""simple docstring"""
snake_case =["""image_processor""", """tokenizer"""]
snake_case ="""LayoutLMv2ImageProcessor"""
snake_case =("""LayoutXLMTokenizer""", """LayoutXLMTokenizerFast""")
def __init__( self , _snake_case=None , _snake_case=None , **_snake_case ):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _snake_case , )
_UpperCAmelCase =kwargs.pop("feature_extractor" )
_UpperCAmelCase =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_snake_case , _snake_case )
def __call__( self , _snake_case , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = True , _snake_case = False , _snake_case = None , _snake_case = None , _snake_case = 0 , _snake_case = None , _snake_case = None , _snake_case = None , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = False , _snake_case = True , _snake_case = None , **_snake_case , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes "
"if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError("You cannot return overflowing tokens without returning the offsets mapping." )
# first, apply the image processor
_UpperCAmelCase =self.image_processor(images=_snake_case , return_tensors=_snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =[text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase =features["words"]
_UpperCAmelCase =self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=_snake_case , add_special_tokens=_snake_case , padding=_snake_case , truncation=_snake_case , max_length=_snake_case , stride=_snake_case , pad_to_multiple_of=_snake_case , return_token_type_ids=_snake_case , return_attention_mask=_snake_case , return_overflowing_tokens=_snake_case , return_special_tokens_mask=_snake_case , return_offsets_mapping=_snake_case , return_length=_snake_case , verbose=_snake_case , return_tensors=_snake_case , **_snake_case , )
# add pixel values
_UpperCAmelCase =features.pop("pixel_values" )
if return_overflowing_tokens is True:
_UpperCAmelCase =self.get_overflowing_images(_snake_case , encoded_inputs["overflow_to_sample_mapping"] )
_UpperCAmelCase =images
return encoded_inputs
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCAmelCase =[]
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(_snake_case ) != len(_snake_case ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
F" {len(_snake_case )} and {len(_snake_case )}" )
return images_with_overflow
def SCREAMING_SNAKE_CASE ( self , *_snake_case , **_snake_case ):
return self.tokenizer.batch_decode(*_snake_case , **_snake_case )
def SCREAMING_SNAKE_CASE ( self , *_snake_case , **_snake_case ):
return self.tokenizer.decode(*_snake_case , **_snake_case )
@property
def SCREAMING_SNAKE_CASE ( self ):
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _snake_case , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE ( self ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _snake_case , )
return self.image_processor
| 592 |
from __future__ import annotations
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ) ->dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 592 | 1 |
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = False, False, False
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCamelCase_ = None
lowerCamelCase_ = True
lowerCamelCase_ = True
lowerCamelCase_ = None
# Automatically constructed
lowerCamelCase_ = "dict"
lowerCamelCase_ = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase_ = field(default='Audio', init=snake_case, repr=snake_case )
def __call__( self : Tuple ):
"""simple docstring"""
return self.pa_type
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : Union[str, bytes, dict] ):
"""simple docstring"""
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err
if isinstance(snake_case_ , snake_case_ ):
return {"bytes": None, "path": value}
elif isinstance(snake_case_ , snake_case_ ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
A : Dict = BytesIO()
sf.write(snake_case_ , value['''array'''] , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('''pcm''' ):
# "PCM" only has raw audio bytes
if value.get('''sampling_rate''' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' )
if value.get('''bytes''' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
A : Optional[int] = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2767
else:
A : int = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2767
A : Dict = BytesIO(bytes() )
sf.write(snake_case_ , snake_case_ , value['''sampling_rate'''] , format='''wav''' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('''path''' )}
elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )}
else:
raise ValueError(
f"""An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def _UpperCAmelCase ( self : Dict , snake_case_ : dict , snake_case_ : Optional[Dict[str, Union[str, bool, None]]] = None ):
"""simple docstring"""
if not self.decode:
raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' )
A , A : Optional[Any] = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None)
if path is None and file is None:
raise ValueError(f"""An audio sample should have one of 'path' or 'bytes' but both are None in {value}.""" )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err
A : Optional[int] = xsplitext(snake_case_ )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '''
'''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' )
if file is None:
A : Optional[int] = token_per_repo_id or {}
A : List[str] = path.split('''::''' )[-1]
try:
A : Optional[Any] = string_to_dict(snake_case_ , config.HUB_DATASETS_URL )['''repo_id''']
A : List[Any] = token_per_repo_id[repo_id]
except (ValueError, KeyError):
A : Union[str, Any] = None
with xopen(snake_case_ , '''rb''' , use_auth_token=snake_case_ ) as f:
A , A : List[Any] = sf.read(snake_case_ )
else:
A , A : List[str] = sf.read(snake_case_ )
A : str = array.T
if self.mono:
A : Tuple = librosa.to_mono(snake_case_ )
if self.sampling_rate and self.sampling_rate != sampling_rate:
A : int = librosa.resample(snake_case_ , orig_sr=snake_case_ , target_sr=self.sampling_rate )
A : int = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def _UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
from .features import Value
if self.decode:
raise ValueError('''Cannot flatten a decoded Audio feature.''' )
return {
"bytes": Value('''binary''' ),
"path": Value('''string''' ),
}
def _UpperCAmelCase ( self : Any , snake_case_ : Union[pa.StringArray, pa.StructArray] ):
"""simple docstring"""
if pa.types.is_string(storage.type ):
A : Union[str, Any] = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
A : Dict = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
A : Dict = pa.array([None] * len(snake_case_ ) , type=pa.string() )
A : Any = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ):
A : str = pa.array([Audio().encode_example(snake_case_ ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('''bytes''' ) >= 0:
A : str = storage.field('''bytes''' )
else:
A : Tuple = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
if storage.type.get_field_index('''path''' ) >= 0:
A : List[str] = storage.field('''path''' )
else:
A : str = pa.array([None] * len(snake_case_ ) , type=pa.string() )
A : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() )
return array_cast(snake_case_ , self.pa_type )
def _UpperCAmelCase ( self : Optional[int] , snake_case_ : pa.StructArray ):
"""simple docstring"""
@no_op_if_value_is_null
def path_to_bytes(snake_case_ : Any ):
with xopen(snake_case_ , '''rb''' ) as f:
A : Optional[Any] = f.read()
return bytes_
A : str = pa.array(
[
(path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
A : Union[str, Any] = pa.array(
[os.path.basename(snake_case_ ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , )
A : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() )
return array_cast(snake_case_ , self.pa_type ) | 256 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
UpperCamelCase_ = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__) | 256 | 1 |
from __future__ import annotations
import math
class __lowercase:
"""simple docstring"""
def __init__( self : str , _lowerCAmelCase : int ) -> None:
_lowerCAmelCase = size
# approximate the overall size of segment tree with given value
_lowerCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_lowerCAmelCase = [0 for i in range(0 , 4 * size )]
_lowerCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def SCREAMING_SNAKE_CASE_ ( self : Tuple , _lowerCAmelCase : int ) -> int:
return idx * 2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , _lowerCAmelCase : int ) -> int:
return idx * 2 + 1
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] ) -> None:
if left_element == right_element:
_lowerCAmelCase = a[left_element - 1]
else:
_lowerCAmelCase = (left_element + right_element) // 2
self.build(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.build(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
def SCREAMING_SNAKE_CASE_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> bool:
if self.flag[idx] is True:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = False
if left_element != right_element:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = True
_lowerCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_lowerCAmelCase = val
if left_element != right_element:
_lowerCAmelCase = val
_lowerCAmelCase = val
_lowerCAmelCase = True
_lowerCAmelCase = True
return True
_lowerCAmelCase = (left_element + right_element) // 2
self.update(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.update(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
return True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if self.flag[idx] is True:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = False
if left_element != right_element:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = True
_lowerCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_lowerCAmelCase = (left_element + right_element) // 2
_lowerCAmelCase = self.query(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.query(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return max(_lowerCAmelCase , _lowerCAmelCase )
def __str__( self : Optional[Any] ) -> str:
return str([self.query(1 , 1 , self.size , _lowerCAmelCase , _lowerCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCamelCase: Optional[Any] =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCamelCase: Optional[Any] =15
_UpperCamelCase: Union[str, Any] =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 585 |
from __future__ import annotations
import math
class __lowercase:
"""simple docstring"""
def __init__( self : str , _lowerCAmelCase : int ) -> None:
_lowerCAmelCase = size
# approximate the overall size of segment tree with given value
_lowerCAmelCase = [0 for i in range(0 , 4 * size )]
# create array to store lazy update
_lowerCAmelCase = [0 for i in range(0 , 4 * size )]
_lowerCAmelCase = [0 for i in range(0 , 4 * size )] # flag for lazy update
def SCREAMING_SNAKE_CASE_ ( self : Tuple , _lowerCAmelCase : int ) -> int:
return idx * 2
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , _lowerCAmelCase : int ) -> int:
return idx * 2 + 1
def SCREAMING_SNAKE_CASE_ ( self : List[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : list[int] ) -> None:
if left_element == right_element:
_lowerCAmelCase = a[left_element - 1]
else:
_lowerCAmelCase = (left_element + right_element) // 2
self.build(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.build(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
def SCREAMING_SNAKE_CASE_ ( self : Any , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> bool:
if self.flag[idx] is True:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = False
if left_element != right_element:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = True
_lowerCAmelCase = True
if right_element < a or left_element > b:
return True
if left_element >= a and right_element <= b:
_lowerCAmelCase = val
if left_element != right_element:
_lowerCAmelCase = val
_lowerCAmelCase = val
_lowerCAmelCase = True
_lowerCAmelCase = True
return True
_lowerCAmelCase = (left_element + right_element) // 2
self.update(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
self.update(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = max(
self.segment_tree[self.left(_lowerCAmelCase )] , self.segment_tree[self.right(_lowerCAmelCase )] )
return True
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : int ) -> int | float:
if self.flag[idx] is True:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = False
if left_element != right_element:
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = self.lazy[idx]
_lowerCAmelCase = True
_lowerCAmelCase = True
if right_element < a or left_element > b:
return -math.inf
if left_element >= a and right_element <= b:
return self.segment_tree[idx]
_lowerCAmelCase = (left_element + right_element) // 2
_lowerCAmelCase = self.query(self.left(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
_lowerCAmelCase = self.query(self.right(_lowerCAmelCase ) , mid + 1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return max(_lowerCAmelCase , _lowerCAmelCase )
def __str__( self : Optional[Any] ) -> str:
return str([self.query(1 , 1 , self.size , _lowerCAmelCase , _lowerCAmelCase ) for i in range(1 , self.size + 1 )] )
if __name__ == "__main__":
_UpperCamelCase: Optional[Any] =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8]
_UpperCamelCase: Optional[Any] =15
_UpperCamelCase: Union[str, Any] =SegmentTree(size)
segt.build(1, 1, size, A)
print(segt.query(1, 1, size, 4, 6))
print(segt.query(1, 1, size, 7, 11))
print(segt.query(1, 1, size, 7, 12))
segt.update(1, 1, size, 1, 3, 111)
print(segt.query(1, 1, size, 1, 15))
segt.update(1, 1, size, 7, 8, 235)
print(segt)
| 585 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
a = logging.get_logger(__name__)
a = {
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/resolve/main/config.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/config.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/config.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : List[str] = 'bloom'
__UpperCamelCase : Optional[Any] = ['past_key_values']
__UpperCamelCase : Optional[int] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self : Union[str, Any] ,lowerCamelCase : str=25_0880 ,lowerCamelCase : List[Any]=64 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : Tuple=8 ,lowerCamelCase : Union[str, Any]=1E-5 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : str=True ,lowerCamelCase : List[Any]=1 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Optional[int]=0.0 ,lowerCamelCase : List[Any]=0.0 ,lowerCamelCase : Optional[Any]=1 ,lowerCamelCase : Tuple=False ,**lowerCamelCase : str ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
# Backward compatibility with n_embed kwarg
__SCREAMING_SNAKE_CASE = kwargs.pop("""n_embed""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size if n_embed is None else n_embed
__SCREAMING_SNAKE_CASE = n_layer
__SCREAMING_SNAKE_CASE = n_head
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = pretraining_tp
__SCREAMING_SNAKE_CASE = apply_residual_connection_post_layernorm
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
__SCREAMING_SNAKE_CASE = slow_but_exact
super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
class __a ( _snake_case ):
__UpperCamelCase : int = version.parse('1.12' )
def __init__( self : Optional[int] ,lowerCamelCase : PretrainedConfig ,lowerCamelCase : str = "default" ,lowerCamelCase : List[PatchingSpec] = None ,lowerCamelCase : bool = False ,):
'''simple docstring'''
super().__init__(lowerCamelCase ,task=lowerCamelCase ,patching_specs=lowerCamelCase ,use_past=lowerCamelCase )
if not getattr(self._config ,"""pad_token_id""" ,lowerCamelCase ):
# TODO: how to do that better?
__SCREAMING_SNAKE_CASE = 0
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(lowerCamelCase ,direction="""inputs""" ,inverted_values_shape=lowerCamelCase )
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self._config.n_layer
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self._config.n_head
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 1E-3
def UpperCAmelCase__ ( self : str ,lowerCamelCase : "PreTrainedTokenizer" ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional["TensorType"] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super(lowerCamelCase ,self ).generate_dummy_inputs(
lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE = seqlen + 2
__SCREAMING_SNAKE_CASE = self._config.hidden_size // self.num_attention_heads
__SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__SCREAMING_SNAKE_CASE = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__SCREAMING_SNAKE_CASE = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE = common_inputs["""attention_mask"""]
if self.use_past:
__SCREAMING_SNAKE_CASE = ordered_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase ,lowerCamelCase ,dtype=lowerCamelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return 13
| 109 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: int = 600_851_475_143 ):
try:
SCREAMING_SNAKE_CASE__ = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 2
while i * i <= n:
while n % i == 0:
SCREAMING_SNAKE_CASE__ = i
n //= i
i += 1
if n > 1:
SCREAMING_SNAKE_CASE__ = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(F'''{solution() = }''') | 6 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class lowercase ( a_ ):
"""simple docstring"""
_UpperCamelCase : Any = (DEISMultistepScheduler,)
_UpperCamelCase : List[str] = (("num_inference_steps", 25),)
def __UpperCAmelCase ( self : Union[str, Any] , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
_snake_case : Tuple = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'solver_order': 2,
}
config.update(**lowerCamelCase_ )
return config
def __UpperCAmelCase ( self : Optional[Any] , lowerCamelCase_ : Optional[int]=0 , **lowerCamelCase_ : Tuple ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : Any = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : int = self.dummy_sample
_snake_case : Optional[Any] = 0.1 * sample
_snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[Any] = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : List[Any] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : Optional[Any] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
_snake_case : Any = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case , _snake_case : List[Any] = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
_snake_case : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : int = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : Tuple ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self : Tuple , lowerCamelCase_ : Optional[int]=0 , **lowerCamelCase_ : str ):
'''simple docstring'''
_snake_case : str = dict(self.forward_default_kwargs )
_snake_case : Any = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
_snake_case : str = self.dummy_sample
_snake_case : str = 0.1 * sample
_snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[Any] = self.get_scheduler_config()
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
_snake_case : Optional[int] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
_snake_case : Any = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
_snake_case : Union[str, Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_snake_case : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Tuple = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __UpperCAmelCase ( self : str , lowerCamelCase_ : List[str]=None , **lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
if scheduler is None:
_snake_case : str = self.scheduler_classes[0]
_snake_case : Any = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ )
_snake_case : Optional[Any] = self.scheduler_classes[0]
_snake_case : str = self.get_scheduler_config(**lowerCamelCase_ )
_snake_case : int = scheduler_class(**lowerCamelCase_ )
_snake_case : str = 10
_snake_case : Tuple = self.dummy_model()
_snake_case : int = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : Any = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : str = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = dict(self.forward_default_kwargs )
_snake_case : List[str] = kwargs.pop('num_inference_steps' , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
_snake_case : Optional[int] = self.get_scheduler_config()
_snake_case : Optional[int] = scheduler_class(**lowerCamelCase_ )
_snake_case : str = self.dummy_sample
_snake_case : Optional[Any] = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , 'set_timesteps' ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , 'set_timesteps' ):
_snake_case : Optional[int] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_snake_case : List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
_snake_case : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
_snake_case : Dict = scheduler.timesteps[5]
_snake_case : Tuple = scheduler.timesteps[6]
_snake_case : Union[str, Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
_snake_case : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
_snake_case : Union[str, Any] = DEISMultistepScheduler(**self.get_scheduler_config() )
_snake_case : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : Tuple = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
_snake_case : Optional[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_snake_case : str = DPMSolverMultistepScheduler.from_config(scheduler.config )
_snake_case : Optional[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
_snake_case : Optional[Any] = DEISMultistepScheduler.from_config(scheduler.config )
_snake_case : Tuple = self.full_loop(scheduler=lowerCamelCase_ )
_snake_case : str = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
for timesteps in [25, 50, 1_00, 9_99, 10_00]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type='deis' , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __UpperCAmelCase ( self : Dict ):
'''simple docstring'''
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
_snake_case : List[str] = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def __UpperCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
for num_inference_steps in [1, 2, 3, 5, 10, 50, 1_00, 9_99, 10_00]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : str = self.full_loop()
_snake_case : Optional[int] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3916 ) < 1e-3
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = self.full_loop(prediction_type='v_prediction' )
_snake_case : int = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.091 ) < 1e-3
def __UpperCAmelCase ( self : Any ):
'''simple docstring'''
_snake_case : Optional[int] = self.scheduler_classes[0]
_snake_case : Optional[Any] = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
_snake_case : Tuple = scheduler_class(**lowerCamelCase_ )
_snake_case : List[Any] = 10
_snake_case : Optional[int] = self.dummy_model()
_snake_case : Any = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
_snake_case : List[Any] = model(lowerCamelCase_ , lowerCamelCase_ )
_snake_case : Optional[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 652 |
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
# Initialise PyTorch model
_snake_case : Optional[int] = BertConfig.from_json_file(__lowerCAmelCase )
print(F'''Building PyTorch model from configuration: {config}''' )
_snake_case : List[str] = BertForPreTraining(__lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--bert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase_ : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 652 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
A : Optional[int] = {
'configuration_m2m_100': ['M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP', 'M2M100Config', 'M2M100OnnxConfig'],
'tokenization_m2m_100': ['M2M100Tokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : int = [
'M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST',
'M2M100ForConditionalGeneration',
'M2M100Model',
'M2M100PreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mam_aaa import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, MaMaaaConfig, MaMaaaOnnxConfig
from .tokenization_mam_aaa import MaMaaaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mam_aaa import (
M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST,
MaMaaaForConditionalGeneration,
MaMaaaModel,
MaMaaaPreTrainedModel,
)
else:
import sys
A : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 15 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE__ =42
SCREAMING_SNAKE_CASE__ =42
def __init__( self, _a, _a ) -> Dict:
super().__init__()
self.register_modules(unet=_a, scheduler=_a )
@torch.no_grad()
def __call__( self, _a = 1, _a = 20_00, _a = None, _a = "pil", _a = True, **_a, ) -> Union[ImagePipelineOutput, Tuple]:
__SCREAMING_SNAKE_CASE = self.unet.config.sample_size
__SCREAMING_SNAKE_CASE = (batch_size, 3, img_size, img_size)
__SCREAMING_SNAKE_CASE = self.unet
__SCREAMING_SNAKE_CASE = randn_tensor(_a, generator=_a ) * self.scheduler.init_noise_sigma
__SCREAMING_SNAKE_CASE = sample.to(self.device )
self.scheduler.set_timesteps(_a )
self.scheduler.set_sigmas(_a )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__SCREAMING_SNAKE_CASE = self.scheduler.sigmas[i] * torch.ones(shape[0], device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__SCREAMING_SNAKE_CASE = self.unet(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_correct(_a, _a, generator=_a ).prev_sample
# prediction step
__SCREAMING_SNAKE_CASE = model(_a, _a ).sample
__SCREAMING_SNAKE_CASE = self.scheduler.step_pred(_a, _a, _a, generator=_a )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = output.prev_sample, output.prev_sample_mean
__SCREAMING_SNAKE_CASE = sample_mean.clamp(0, 1 )
__SCREAMING_SNAKE_CASE = sample.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(_a )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=_a )
| 693 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ = {
'''configuration_trajectory_transformer''': [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TrajectoryTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ = [
'''TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrajectoryTransformerModel''',
'''TrajectoryTransformerPreTrainedModel''',
'''load_tf_weights_in_trajectory_transformer''',
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
A__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 717 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class a :
def __init__( self :List[Any] ,__lowercase :Tuple ,__lowercase :List[Any]=1_3 ,__lowercase :List[Any]=7 ,__lowercase :int=True ,__lowercase :int=True ,__lowercase :Tuple=True ,__lowercase :int=True ,__lowercase :Dict=9_9 ,__lowercase :Any=3_2 ,__lowercase :Tuple=2 ,__lowercase :Union[str, Any]=4 ,__lowercase :Tuple=3_7 ,__lowercase :int="gelu" ,__lowercase :int=0.1 ,__lowercase :Dict=0.1 ,__lowercase :Optional[Any]=5_1_2 ,__lowercase :Optional[Any]=1_6 ,__lowercase :Optional[int]=2 ,__lowercase :Optional[int]=0.02 ,__lowercase :str=3 ,__lowercase :int=4 ,__lowercase :List[str]=None ,__lowercase :Union[str, Any]=0 ,):
snake_case__ : List[str] = parent
snake_case__ : int = batch_size
snake_case__ : Any = seq_length
snake_case__ : List[Any] = is_training
snake_case__ : str = use_input_mask
snake_case__ : str = use_token_type_ids
snake_case__ : Dict = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : str = num_hidden_layers
snake_case__ : int = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : str = type_vocab_size
snake_case__ : Tuple = type_sequence_label_size
snake_case__ : Any = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : str = num_choices
snake_case__ : Optional[Any] = scope
snake_case__ : str = projection_dim
def __lowerCamelCase ( self :List[Any] ):
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
snake_case__ : List[Any] = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Any = None
if self.use_token_type_ids:
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : Optional[Any] = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices )
snake_case__ : Optional[int] = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__lowercase ,initializer_range=self.initializer_range ,)
snake_case__ : Optional[int] = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self :Tuple ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Optional[int] ,__lowercase :Any ,__lowercase :Tuple ):
snake_case__ : List[str] = TFDPRContextEncoder(config=__lowercase )
snake_case__ : Optional[int] = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase )
snake_case__ : Optional[int] = model(__lowercase ,token_type_ids=__lowercase )
snake_case__ : Dict = model(__lowercase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self :Any ,__lowercase :List[str] ,__lowercase :List[str] ,__lowercase :Optional[Any] ,__lowercase :int ,__lowercase :List[Any] ,__lowercase :List[Any] ,__lowercase :Union[str, Any] ):
snake_case__ : Dict = TFDPRQuestionEncoder(config=__lowercase )
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase ,token_type_ids=__lowercase )
snake_case__ : int = model(__lowercase ,token_type_ids=__lowercase )
snake_case__ : Optional[int] = model(__lowercase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def __lowerCamelCase ( self :Optional[int] ,__lowercase :List[Any] ,__lowercase :str ,__lowercase :Tuple ,__lowercase :Any ,__lowercase :Optional[Any] ,__lowercase :List[Any] ,__lowercase :Tuple ):
snake_case__ : int = TFDPRReader(config=__lowercase )
snake_case__ : Any = model(__lowercase ,attention_mask=__lowercase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Optional[int] = config_and_inputs
snake_case__ : Optional[Any] = {'''input_ids''': input_ids}
return config, inputs_dict
@require_tf
class a ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Tuple = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
__lowerCAmelCase : List[str] = {"""feature-extraction""": TFDPRQuestionEncoder} if is_tf_available() else {}
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : int = False
__lowerCAmelCase : Optional[int] = False
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = TFDPRModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self ,config_class=__lowercase ,hidden_size=3_7 )
def __lowerCamelCase ( self :List[str] ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__lowercase )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__lowercase )
@slow
def __lowerCamelCase ( self :Union[str, Any] ):
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Union[str, Any] = TFDPRContextEncoder.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple = TFDPRContextEncoder.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Tuple = TFDPRQuestionEncoder.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = TFDPRReader.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
@require_tf
class a ( unittest.TestCase ):
@slow
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = TFDPRQuestionEncoder.from_pretrained('''facebook/dpr-question_encoder-single-nq-base''' )
snake_case__ : Optional[int] = tf.constant(
[[1_0_1, 7_5_9_2, 1_0_1_0, 2_0_0_3, 2_0_2_6, 3_8_9_9, 1_0_1_4_0, 1_0_2_9, 1_0_2]] ) # [CLS] hello, is my dog cute? [SEP]
snake_case__ : Union[str, Any] = model(__lowercase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
snake_case__ : Optional[Any] = tf.constant(
[
[
0.0323_6253,
0.1275_3335,
0.1681_8509,
0.0027_9786,
0.389_6933,
0.2426_4945,
0.217_8971,
-0.0233_5227,
-0.0848_1959,
-0.1432_4117,
]
] )
self.assertTrue(numpy.allclose(output[:, :1_0].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 219 | 0 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A_ : str = logging.get_logger(__name__)
A_ : Tuple = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A_ : Union[str, Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __snake_case :
'''simple docstring'''
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(__SCREAMING_SNAKE_CASE )} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
lowerCamelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
lowerCamelCase__ = field(
default=128 , metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''} , )
lowerCamelCase__ = field(
default=64 , metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
} , )
lowerCamelCase__ = field(
default=30 , metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
} , )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
lowerCamelCase__ = field(
default=__SCREAMING_SNAKE_CASE , metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
lowerCamelCase__ = field(
default=0.0 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowerCamelCase__ = field(
default=20 , metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
lowerCamelCase__ = field(
default=0 , metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
} , )
lowerCamelCase__ = field(default=1 , metadata={'''help''': '''multiple threads for converting example to features'''} )
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = '''train'''
lowerCamelCase__ = '''dev'''
class __snake_case ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = Split.train , __SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "pt" , ):
snake_case__ : int = args
snake_case__ : List[Any] = is_language_sensitive
snake_case__ : Optional[int] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
try:
snake_case__ : str = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
snake_case__ : Dict = mode
# Load data features from cache or dataset file
snake_case__ : Dict = "v2" if args.version_2_with_negative else "v1"
snake_case__ : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case__ : Any = cached_features_file + ".lock"
with FileLock(__lowerCamelCase ):
if os.path.exists(__lowerCamelCase ) and not args.overwrite_cache:
snake_case__ : Union[str, Any] = time.time()
snake_case__ : Optional[int] = torch.load(__lowerCamelCase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
snake_case__ : List[str] = self.old_features["features"]
snake_case__ : int = self.old_features.get("""dataset""" , __lowerCamelCase )
snake_case__ : Optional[int] = self.old_features.get("""examples""" , __lowerCamelCase )
logger.info(
f"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"
""" future run""" )
else:
if mode == Split.dev:
snake_case__ : List[str] = self.processor.get_dev_examples(args.data_dir )
else:
snake_case__ : Dict = self.processor.get_train_examples(args.data_dir )
snake_case__ : Optional[Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=__lowerCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=__lowerCamelCase , )
snake_case__ : Optional[Any] = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , __lowerCamelCase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" )
def __len__( self ):
return len(self.features )
def __getitem__( self , __SCREAMING_SNAKE_CASE ):
# Convert to Tensors and build dataset
snake_case__ : Dict = self.features[i]
snake_case__ : Tuple = torch.tensor(feature.input_ids , dtype=torch.long )
snake_case__ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
snake_case__ : Any = torch.tensor(feature.token_type_ids , dtype=torch.long )
snake_case__ : int = torch.tensor(feature.cls_index , dtype=torch.long )
snake_case__ : Optional[Any] = torch.tensor(feature.p_mask , dtype=torch.float )
snake_case__ : int = torch.tensor(feature.is_impossible , dtype=torch.float )
snake_case__ : Tuple = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
snake_case__ : List[Any] = torch.tensor(feature.start_position , dtype=torch.long )
snake_case__ : List[str] = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 38 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ = {
'configuration_jukebox': [
'JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP',
'JukeboxConfig',
'JukeboxPriorConfig',
'JukeboxVQVAEConfig',
],
'tokenization_jukebox': ['JukeboxTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
'JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'JukeboxModel',
'JukeboxPreTrainedModel',
'JukeboxVQVAE',
'JukeboxPrior',
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 503 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ = 5_0 ):
lowerCamelCase_ = [[0] * 3 for _ in range(length + 1 )]
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length] )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 313 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class _SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ):
lowerCAmelCase__ = 'pixel_values'
lowerCAmelCase__ = False
lowerCAmelCase__ = TimmBackboneConfig
def __init__( self , lowercase , **lowercase ) -> Union[str, Any]:
requires_backends(self , "timm" )
super().__init__(lowercase )
lowerCamelCase_ = config
if config.backbone is None:
raise ValueError("backbone is not set in the config. Please set it to a timm model name." )
if config.backbone not in timm.list_models():
raise ValueError(f'backbone {config.backbone} is not supported by timm.' )
if hasattr(lowercase , "out_features" ) and config.out_features is not None:
raise ValueError("out_features is not supported by TimmBackbone. Please use out_indices instead." )
lowerCamelCase_ = getattr(lowercase , "use_pretrained_backbone" , lowercase )
if pretrained is None:
raise ValueError("use_pretrained_backbone is not set in the config. Please set it to True or False." )
# We just take the final layer by default. This matches the default for the transformers models.
lowerCamelCase_ = config.out_indices if getattr(lowercase , "out_indices" , lowercase ) is not None else (-1,)
lowerCamelCase_ = timm.create_model(
config.backbone , pretrained=lowercase , features_only=config.features_only , in_chans=config.num_channels , out_indices=lowercase , **lowercase , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowerCamelCase_ = self._backbone.return_layers
lowerCamelCase_ = {layer["module"]: str(lowercase ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(lowercase )
@classmethod
def SCREAMING_SNAKE_CASE_( cls , lowercase , *lowercase , **lowercase ) -> Tuple:
requires_backends(cls , ["vision", "timm"] )
from ...models.timm_backbone import TimmBackboneConfig
lowerCamelCase_ = kwargs.pop("config" , TimmBackboneConfig() )
lowerCamelCase_ = kwargs.pop("use_timm_backbone" , lowercase )
if not use_timm:
raise ValueError("use_timm_backbone must be True for timm backbones" )
lowerCamelCase_ = kwargs.pop("num_channels" , config.num_channels )
lowerCamelCase_ = kwargs.pop("features_only" , config.features_only )
lowerCamelCase_ = kwargs.pop("use_pretrained_backbone" , config.use_pretrained_backbone )
lowerCamelCase_ = kwargs.pop("out_indices" , config.out_indices )
lowerCamelCase_ = TimmBackboneConfig(
backbone=lowercase , num_channels=lowercase , features_only=lowercase , use_pretrained_backbone=lowercase , out_indices=lowercase , )
return super()._from_config(lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase ) -> List[Any]:
pass
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase=None , lowercase=None , lowercase=None , **lowercase ) -> Union[BackboneOutput, Tuple[Tensor, ...]]:
lowerCamelCase_ = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase_ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowerCamelCase_ = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError("Cannot output attentions for timm backbones at the moment" )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowerCamelCase_ = self._all_layers
lowerCamelCase_ = self._backbone(lowercase , **lowercase )
lowerCamelCase_ = self._return_layers
lowerCamelCase_ = tuple(hidden_states[i] for i in self.out_indices )
else:
lowerCamelCase_ = self._backbone(lowercase , **lowercase )
lowerCamelCase_ = None
lowerCamelCase_ = tuple(lowercase )
lowerCamelCase_ = tuple(lowercase ) if hidden_states is not None else None
if not return_dict:
lowerCamelCase_ = (feature_maps,)
if output_hidden_states:
lowerCamelCase_ = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=lowercase , hidden_states=lowercase , attentions=lowercase )
| 313 | 1 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F'''could not parse string as bool {string}''' )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
__a = parser.parse_args()
__a = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 97 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def A_ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
UpperCamelCase : Union[str, Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
UpperCamelCase : List[str] = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
UpperCamelCase : str = F"""{src_lang}-{tgt_lang}"""
UpperCamelCase : Dict = F"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR's WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
"""
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
UpperCamelCase : Union[str, Any] = os.path.join(_lowerCAmelCase , "README.md" )
print(F"""Generating {path}""" )
with open(_lowerCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(_lowerCAmelCase )
# make sure we are under the root of the project
__lowerCamelCase : str = Path(__file__).resolve().parent.parent.parent
__lowerCamelCase : Optional[Any] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : str = model_name.split("""-""")
__lowerCamelCase : str = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 629 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = 1
UpperCAmelCase : Any = 3
UpperCAmelCase : List[str] = (3_2, 3_2)
UpperCAmelCase : List[str] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case )
return image
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Tuple = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = RobertaSeriesConfig(
hidden_size=3_2 , project_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5_0_0_6 , )
return RobertaSeriesModelWithTransformation(snake_case )
@property
def A_ ( self ):
'''simple docstring'''
def extract(*snake_case , **snake_case ):
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self ):
'''simple docstring'''
UpperCAmelCase : str = torch.ones([0] )
def A_ ( self , snake_case ):
'''simple docstring'''
self.pixel_values.to(snake_case )
return self
return Out()
return extract
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase : List[Any] = self.dummy_cond_unet
UpperCAmelCase : List[Any] = PNDMScheduler(skip_prk_steps=snake_case )
UpperCAmelCase : Any = self.dummy_vae
UpperCAmelCase : int = self.dummy_text_encoder
UpperCAmelCase : Tuple = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase : List[Any] = 7_7
UpperCAmelCase : Any = self.dummy_image.to(snake_case )
UpperCAmelCase : Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
UpperCAmelCase : int = AltDiffusionImgaImgPipeline(
unet=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , safety_checker=snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase : Tuple = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case )
UpperCAmelCase : str = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : Tuple = "A painting of a squirrel eating a burger"
UpperCAmelCase : Optional[Any] = torch.Generator(device=snake_case ).manual_seed(0 )
UpperCAmelCase : int = alt_pipe(
[prompt] , generator=snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case , )
UpperCAmelCase : Optional[int] = output.images
UpperCAmelCase : Union[str, Any] = torch.Generator(device=snake_case ).manual_seed(0 )
UpperCAmelCase : int = alt_pipe(
[prompt] , generator=snake_case , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , image=snake_case , return_dict=snake_case , )[0]
UpperCAmelCase : Dict = image[0, -3:, -3:, -1]
UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
UpperCAmelCase : Union[str, Any] = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.dummy_cond_unet
UpperCAmelCase : Dict = PNDMScheduler(skip_prk_steps=snake_case )
UpperCAmelCase : Any = self.dummy_vae
UpperCAmelCase : List[Any] = self.dummy_text_encoder
UpperCAmelCase : Union[str, Any] = XLMRobertaTokenizer.from_pretrained("hf-internal-testing/tiny-xlm-roberta" )
UpperCAmelCase : Tuple = 7_7
UpperCAmelCase : Optional[int] = self.dummy_image.to(snake_case )
# put models in fp16
UpperCAmelCase : Optional[int] = unet.half()
UpperCAmelCase : int = vae.half()
UpperCAmelCase : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
UpperCAmelCase : List[str] = AltDiffusionImgaImgPipeline(
unet=snake_case , scheduler=snake_case , vae=snake_case , text_encoder=snake_case , tokenizer=snake_case , safety_checker=snake_case , feature_extractor=self.dummy_extractor , )
UpperCAmelCase : Optional[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=snake_case )
UpperCAmelCase : List[Any] = alt_pipe.to(snake_case )
alt_pipe.set_progress_bar_config(disable=snake_case )
UpperCAmelCase : int = "A painting of a squirrel eating a burger"
UpperCAmelCase : List[str] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = alt_pipe(
[prompt] , generator=snake_case , num_inference_steps=2 , output_type="np" , image=snake_case , ).images
assert image.shape == (1, 3_2, 3_2, 3)
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
# resize to resolution that is divisible by 8 but not 16 or 32
UpperCAmelCase : Optional[Any] = init_image.resize((7_6_0, 5_0_4) )
UpperCAmelCase : List[Any] = "BAAI/AltDiffusion"
UpperCAmelCase : str = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case , safety_checker=snake_case , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
UpperCAmelCase : Optional[Any] = "A fantasy landscape, trending on artstation"
UpperCAmelCase : Optional[Any] = torch.manual_seed(0 )
UpperCAmelCase : str = pipe(
prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , generator=snake_case , output_type="np" , )
UpperCAmelCase : str = output.images[0]
UpperCAmelCase : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 7_6_0, 3)
UpperCAmelCase : Optional[Any] = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase : Dict = init_image.resize((7_6_8, 5_1_2) )
UpperCAmelCase : Dict = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy" )
UpperCAmelCase : Any = "BAAI/AltDiffusion"
UpperCAmelCase : Dict = AltDiffusionImgaImgPipeline.from_pretrained(
snake_case , safety_checker=snake_case , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
UpperCAmelCase : int = "A fantasy landscape, trending on artstation"
UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = pipe(
prompt=snake_case , image=snake_case , strength=0.75 , guidance_scale=7.5 , generator=snake_case , output_type="np" , )
UpperCAmelCase : List[str] = output.images[0]
assert image.shape == (5_1_2, 7_6_8, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2
| 713 |
'''simple docstring'''
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : torch.FloatTensor
SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None
def lowercase ( __magic_name__ , __magic_name__=0.9_9_9 , __magic_name__="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__magic_name__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__magic_name__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCAmelCase : List[str] = []
for i in range(__magic_name__ ):
UpperCAmelCase : str = i / num_diffusion_timesteps
UpperCAmelCase : List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__magic_name__ ) / alpha_bar_fn(__magic_name__ ) , __magic_name__ ) )
return torch.tensor(__magic_name__ , dtype=torch.floataa )
class UpperCamelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
@register_to_config
def __init__( self , snake_case = 1_0_0_0 , snake_case = "fixed_small_log" , snake_case = True , snake_case = 1.0 , snake_case = "epsilon" , snake_case = "squaredcos_cap_v2" , ):
'''simple docstring'''
if beta_schedule != "squaredcos_cap_v2":
raise ValueError("UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'" )
UpperCAmelCase : List[str] = betas_for_alpha_bar(snake_case )
UpperCAmelCase : Any = 1.0 - self.betas
UpperCAmelCase : str = torch.cumprod(self.alphas , dim=0 )
UpperCAmelCase : Tuple = torch.tensor(1.0 )
# standard deviation of the initial noise distribution
UpperCAmelCase : List[Any] = 1.0
# setable values
UpperCAmelCase : List[str] = None
UpperCAmelCase : Tuple = torch.from_numpy(np.arange(0 , snake_case )[::-1].copy() )
UpperCAmelCase : int = variance_type
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
return sample
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : Dict = num_inference_steps
UpperCAmelCase : str = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1)
UpperCAmelCase : Union[str, Any] = (np.arange(0 , snake_case ) * step_ratio).round()[::-1].copy().astype(np.intaa )
UpperCAmelCase : int = torch.from_numpy(snake_case ).to(snake_case )
def A_ ( self , snake_case , snake_case=None , snake_case=None , snake_case=None ):
'''simple docstring'''
if prev_timestep is None:
UpperCAmelCase : Optional[int] = t - 1
UpperCAmelCase : Tuple = self.alphas_cumprod[t]
UpperCAmelCase : List[str] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : Any = 1 - alpha_prod_t
UpperCAmelCase : List[Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : Optional[Any] = self.betas[t]
else:
UpperCAmelCase : Optional[int] = 1 - alpha_prod_t / alpha_prod_t_prev
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase : List[Any] = beta_prod_t_prev / beta_prod_t * beta
if variance_type is None:
UpperCAmelCase : str = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small_log":
UpperCAmelCase : Dict = torch.log(torch.clamp(snake_case , min=1e-20 ) )
UpperCAmelCase : Union[str, Any] = torch.exp(0.5 * variance )
elif variance_type == "learned_range":
# NOTE difference with DDPM scheduler
UpperCAmelCase : Tuple = variance.log()
UpperCAmelCase : int = beta.log()
UpperCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
UpperCAmelCase : Dict = frac * max_log + (1 - frac) * min_log
return variance
def A_ ( self , snake_case , snake_case , snake_case , snake_case = None , snake_case=None , snake_case = True , ):
'''simple docstring'''
UpperCAmelCase : Any = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range":
UpperCAmelCase , UpperCAmelCase : str = torch.split(snake_case , sample.shape[1] , dim=1 )
else:
UpperCAmelCase : Tuple = None
# 1. compute alphas, betas
if prev_timestep is None:
UpperCAmelCase : List[str] = t - 1
UpperCAmelCase : Tuple = self.alphas_cumprod[t]
UpperCAmelCase : Optional[Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one
UpperCAmelCase : Optional[Any] = 1 - alpha_prod_t
UpperCAmelCase : Union[str, Any] = 1 - alpha_prod_t_prev
if prev_timestep == t - 1:
UpperCAmelCase : List[Any] = self.betas[t]
UpperCAmelCase : Optional[int] = self.alphas[t]
else:
UpperCAmelCase : Union[str, Any] = 1 - alpha_prod_t / alpha_prod_t_prev
UpperCAmelCase : List[Any] = 1 - beta
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase : Tuple = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`"
" for the UnCLIPScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase : Union[str, Any] = torch.clamp(
snake_case , -self.config.clip_sample_range , self.config.clip_sample_range )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : Tuple = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t
UpperCAmelCase : Dict = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
UpperCAmelCase : str = 0
if t > 0:
UpperCAmelCase : Union[str, Any] = randn_tensor(
model_output.shape , dtype=model_output.dtype , generator=snake_case , device=model_output.device )
UpperCAmelCase : Union[str, Any] = self._get_variance(
snake_case , predicted_variance=snake_case , prev_timestep=snake_case , )
if self.variance_type == "fixed_small_log":
UpperCAmelCase : Tuple = variance
elif self.variance_type == "learned_range":
UpperCAmelCase : Dict = (0.5 * variance).exp()
else:
raise ValueError(
f"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`"
" for the UnCLIPScheduler." )
UpperCAmelCase : int = variance * variance_noise
UpperCAmelCase : Dict = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return UnCLIPSchedulerOutput(prev_sample=snake_case , pred_original_sample=snake_case )
def A_ ( self , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : str = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype )
UpperCAmelCase : str = timesteps.to(original_samples.device )
UpperCAmelCase : List[str] = alphas_cumprod[timesteps] ** 0.5
UpperCAmelCase : Optional[Any] = sqrt_alpha_prod.flatten()
while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : int = sqrt_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : Any = (1 - alphas_cumprod[timesteps]) ** 0.5
UpperCAmelCase : int = sqrt_one_minus_alpha_prod.flatten()
while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ):
UpperCAmelCase : Tuple = sqrt_one_minus_alpha_prod.unsqueeze(-1 )
UpperCAmelCase : Any = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
| 609 | 0 |
'''simple docstring'''
from math import pow
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , ):
'''simple docstring'''
if current_sum == needed_sum:
# If the sum of the powers is equal to needed_sum, then we have a solution.
solutions_count += 1
return current_sum, solutions_count
_lowerCAmelCase = int(pow(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
if current_sum + i_to_n <= needed_sum:
# If the sum of the powers is less than needed_sum, then continue adding powers.
current_sum += i_to_n
_lowerCAmelCase , _lowerCAmelCase = backtrack(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
current_sum -= i_to_n
if i_to_n < needed_sum:
# If the power of i is less than needed_sum, then try with the next power.
_lowerCAmelCase , _lowerCAmelCase = backtrack(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , current_number + 1 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return current_sum, solutions_count
def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ):
'''simple docstring'''
if not (1 <= needed_sum <= 1000 and 2 <= power <= 10):
raise ValueError(
"Invalid input\n"
"needed_sum must be between 1 and 1000, power between 2 and 10." )
return backtrack(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 1 , 0 , 0 )[1] # Return the solutions_count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowercase_ : str = logging.get_logger(__name__)
def A__( __lowerCAmelCase ):
print('Loading config file...' )
def flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase="" , __lowerCAmelCase="." ):
_snake_case : Dict = []
for k, v in d.items():
_snake_case : Dict = parent_key + sep + k if parent_key else k
if isinstance(__lowerCAmelCase , collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__lowerCAmelCase , __lowerCAmelCase , sep=__lowerCAmelCase ).items() )
else:
items.append((new_key, v) )
return dict(__lowerCAmelCase )
_snake_case : int = argparse.Namespace()
with open(__lowerCAmelCase , 'r' ) as yaml_file:
try:
_snake_case : str = yaml.load(__lowerCAmelCase , Loader=yaml.FullLoader )
_snake_case : int = flatten_yaml_as_dict(__lowerCAmelCase )
for k, v in flat_cfg.items():
setattr(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__lowerCAmelCase , str(__lowerCAmelCase ) ) )
return config
def A__( __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Any = MobileViTVaConfig()
_snake_case : Optional[int] = False
# dataset
if task_name.startswith('imagenet1k_' ):
_snake_case : Optional[Any] = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
_snake_case : Dict = 3_84
else:
_snake_case : List[str] = 2_56
_snake_case : Optional[Any] = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
_snake_case : List[str] = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
_snake_case : Union[str, Any] = 3_84
else:
_snake_case : Optional[Any] = 2_56
_snake_case : Tuple = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
_snake_case : int = 1_51
_snake_case : str = 5_12
_snake_case : Optional[Any] = 'ade20k-id2label.json'
_snake_case : List[Any] = True
elif task_name.startswith('voc_' ):
_snake_case : List[str] = 21
_snake_case : Optional[int] = 5_12
_snake_case : Dict = 'pascal-voc-id2label.json'
_snake_case : str = True
# orig_config
_snake_case : Dict = load_orig_config_file(__lowerCAmelCase )
assert getattr(__lowerCAmelCase , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model"
_snake_case : int = getattr(__lowerCAmelCase , 'model.classification.mitv2.width_multiplier' , 1.0 )
assert (
getattr(__lowerCAmelCase , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
_snake_case : Union[str, Any] = getattr(__lowerCAmelCase , 'model.classification.activation.name' , 'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
_snake_case : Dict = getattr(__lowerCAmelCase , 'model.segmentation.output_stride' , 16 )
if "_deeplabv3" in task_name:
_snake_case : Optional[int] = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] )
_snake_case : Optional[Any] = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 )
_snake_case : str = getattr(__lowerCAmelCase , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 )
# id2label
_snake_case : List[Any] = 'huggingface/label-files'
_snake_case : Any = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type='dataset' ) , 'r' ) )
_snake_case : Dict = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
_snake_case : str = idalabel
_snake_case : List[str] = {v: k for k, v in idalabel.items()}
return config
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[Any] = dct.pop(__lowerCAmelCase )
_snake_case : List[str] = val
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
if base_model:
_snake_case : Optional[int] = ''
else:
_snake_case : Any = 'mobilevitv2.'
_snake_case : Optional[Any] = []
for k in state_dict.keys():
if k[:8] == "encoder.":
_snake_case : List[str] = k[8:]
else:
_snake_case : Tuple = k
if ".block." in k:
_snake_case : List[Any] = k_new.replace('.block.' , '.' )
if ".conv." in k:
_snake_case : Any = k_new.replace('.conv.' , '.convolution.' )
if ".norm." in k:
_snake_case : List[Any] = k_new.replace('.norm.' , '.normalization.' )
if "conv_1." in k:
_snake_case : Any = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if F'''layer_{i}.''' in k:
_snake_case : Tuple = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
_snake_case : Optional[Any] = k_new.replace('.exp_1x1.' , '.expand_1x1.' )
if ".red_1x1." in k:
_snake_case : str = k_new.replace('.red_1x1.' , '.reduce_1x1.' )
for i in [3, 4, 5]:
if F'''layer_{i}.0.''' in k:
_snake_case : Any = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if F'''layer_{i}.1.local_rep.0.''' in k:
_snake_case : Optional[int] = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if F'''layer_{i}.1.local_rep.1.''' in k:
_snake_case : Optional[int] = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
_snake_case : Dict = [0, 1]
elif i == 4:
_snake_case : int = [0, 1, 2, 3]
elif i == 5:
_snake_case : Tuple = [0, 1, 2]
for j in j_in:
if F'''layer_{i}.1.global_rep.{j}.''' in k:
_snake_case : Union[str, Any] = k_new.replace(
F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if F'''layer_{i}.1.global_rep.{j+1}.''' in k:
_snake_case : int = k_new.replace(
F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if F'''layer_{i}.1.conv_proj.''' in k:
_snake_case : Tuple = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
_snake_case : Union[str, Any] = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' )
if "pre_norm_attn.1." in k:
_snake_case : List[str] = k_new.replace('pre_norm_attn.1.' , 'attention.' )
if "pre_norm_ffn.0." in k:
_snake_case : List[Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' )
if "pre_norm_ffn.1." in k:
_snake_case : Optional[int] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
_snake_case : Dict = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' )
if "classifier.1." in k:
_snake_case : Optional[Any] = k_new.replace('classifier.1.' , 'classifier.' )
if "seg_head." in k:
_snake_case : Optional[Any] = k_new.replace('seg_head.' , 'segmentation_head.' )
if ".aspp_layer." in k:
_snake_case : str = k_new.replace('.aspp_layer.' , '.' )
if ".aspp_pool." in k:
_snake_case : str = k_new.replace('.aspp_pool.' , '.' )
rename_keys.append((k, k_new) )
return rename_keys
def A__( __lowerCAmelCase ):
_snake_case : List[str] = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__lowerCAmelCase )
for k in keys_to_ignore:
state_dict.pop(__lowerCAmelCase , __lowerCAmelCase )
def A__( ):
_snake_case : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
_snake_case : List[str] = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Union[str, Any] = get_mobilevitva_config(__lowerCAmelCase , __lowerCAmelCase )
# load original state_dict
_snake_case : Union[str, Any] = torch.load(__lowerCAmelCase , map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
_snake_case : str = MobileViTVaForSemanticSegmentation(__lowerCAmelCase ).eval()
_snake_case : int = False
else:
_snake_case : str = MobileViTVaForImageClassification(__lowerCAmelCase ).eval()
_snake_case : Optional[int] = False
# remove and rename some keys of load the original model
_snake_case : List[Any] = checkpoint
remove_unused_keys(__lowerCAmelCase )
_snake_case : Union[str, Any] = create_rename_keys(__lowerCAmelCase , base_model=__lowerCAmelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# load modified state_dict
model.load_state_dict(__lowerCAmelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
_snake_case : Tuple = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
_snake_case : Union[str, Any] = image_processor(images=prepare_img() , return_tensors='pt' )
_snake_case : Optional[Any] = model(**__lowerCAmelCase )
# verify classification model
if task_name.startswith('imagenet' ):
_snake_case : Tuple = outputs.logits
_snake_case : List[str] = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
_snake_case : Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] )
assert torch.allclose(logits[0, :3] , __lowerCAmelCase , atol=1E-4 )
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
lowercase_ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--task''',
default='''imagenet1k_256''',
type=str,
help=(
'''Name of the task for which the MobileViTV2 model you\'d like to convert is trained on . '''
'''
Classification (ImageNet-1k)
- MobileViTV2 (256x256) : imagenet1k_256
- MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384
- MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :
imagenet21k_to_1k_256
- MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on
ImageNet-1k 384x384) : imagenet21k_to_1k_384
Segmentation
- ADE20K Dataset : ade20k_deeplabv3
- Pascal VOC 2012 Dataset: voc_deeplabv3
'''
),
choices=[
'''imagenet1k_256''',
'''imagenet1k_384''',
'''imagenet21k_to_1k_256''',
'''imagenet21k_to_1k_384''',
'''ade20k_deeplabv3''',
'''voc_deeplabv3''',
],
)
parser.add_argument(
'''--orig_checkpoint_path''', required=True, type=str, help='''Path to the original state dict (.pt file).'''
)
parser.add_argument('''--orig_config_path''', required=True, type=str, help='''Path to the original config file.''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.'''
)
lowercase_ : List[Any] = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 304 | 0 |
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase( self ) -> Optional[Any]:
__UpperCamelCase = 'ylacombe/bark-small'
__UpperCamelCase = tempfile.mkdtemp()
__UpperCamelCase = 'en_speaker_1'
__UpperCamelCase = 'This is a test string'
__UpperCamelCase = 'speaker_embeddings_path.json'
__UpperCamelCase = 'speaker_embeddings'
def __lowercase( self , **_SCREAMING_SNAKE_CASE ) -> Dict:
return AutoTokenizer.from_pretrained(self.checkpoint , **_SCREAMING_SNAKE_CASE )
def __lowercase( self ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BarkProcessor(tokenizer=_SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
__UpperCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def __lowercase( self ) -> List[Any]:
__UpperCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__UpperCamelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__UpperCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='(BOS)' , eos_token='(EOS)' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def __lowercase( self ) -> Tuple:
__UpperCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__UpperCamelCase = 35
__UpperCamelCase = 2
__UpperCamelCase = 8
__UpperCamelCase = {
'semantic_prompt': np.ones(_SCREAMING_SNAKE_CASE ),
'coarse_prompt': np.ones((nb_codebooks_coarse, seq_len) ),
'fine_prompt': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__UpperCamelCase = processor(text=self.input_string , voice_preset=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__UpperCamelCase = os.path.join(self.tmpdirname , 'file.npz' )
np.savez(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__UpperCamelCase = processor(text=self.input_string , voice_preset=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = inputs['history_prompt']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(_SCREAMING_SNAKE_CASE , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__UpperCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def __lowercase( self ) -> int:
__UpperCamelCase = self.get_tokenizer()
__UpperCamelCase = BarkProcessor(tokenizer=_SCREAMING_SNAKE_CASE )
__UpperCamelCase = processor(text=self.input_string )
__UpperCamelCase = tokenizer(
self.input_string , padding='max_length' , max_length=256 , add_special_tokens=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 567 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 567 | 1 |
"""simple docstring"""
import re
import string
import numpy as np
import datasets
SCREAMING_SNAKE_CASE_ = '\nReturns the rate at which the input predicted strings exactly match their references, ignoring any strings input as part of the regexes_to_ignore list.\n'
SCREAMING_SNAKE_CASE_ = '\nArgs:\n predictions: List of predicted texts.\n references: List of reference texts.\n regexes_to_ignore: List, defaults to None. Regex expressions of characters to\n ignore when calculating the exact matches. Note: these regexes are removed\n from the input data before the changes based on the options below (e.g. ignore_case,\n ignore_punctuation, ignore_numbers) are applied.\n ignore_case: Boolean, defaults to False. If true, turns everything\n to lowercase so that capitalization differences are ignored.\n ignore_punctuation: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\n ignore_numbers: Boolean, defaults to False. If true, removes all punctuation before\n comparing predictions and references.\nReturns:\n exact_match: Dictionary containing exact_match rate. Possible values are between 0.0 and 100.0, inclusive.\nExamples:\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 25.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 50.0\n\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True)\n >>> print(round(results["exact_match"], 1))\n 75.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["the cat", "theater", "YELLING", "agent007"]\n >>> preds = ["cat?", "theater", "yelling", "agent"]\n >>> results = exact_match.compute(references=refs, predictions=preds, regexes_to_ignore=["the ", "yell", "YELL"], ignore_case=True, ignore_punctuation=True, ignore_numbers=True)\n >>> print(round(results["exact_match"], 1))\n 100.0\n\n >>> exact_match = datasets.load_metric("exact_match")\n >>> refs = ["The cat sat on the mat.", "Theaters are great.", "It\'s like comparing oranges and apples."]\n >>> preds = ["The cat sat on the mat?", "Theaters are great.", "It\'s like comparing apples and oranges."]\n >>> results = exact_match.compute(references=refs, predictions=preds)\n >>> print(round(results["exact_match"], 1))\n 33.3\n\n'
SCREAMING_SNAKE_CASE_ = '\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__ ( self) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence'''),
'''references''': datasets.Value('''string''' , id='''sequence'''),
}) , reference_urls=[] , )
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_=None , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , ) -> Any:
if regexes_to_ignore is not None:
for s in regexes_to_ignore:
UpperCamelCase = np.array([re.sub(lowerCamelCase_ , '''''' , lowerCamelCase_) for x in predictions])
UpperCamelCase = np.array([re.sub(lowerCamelCase_ , '''''' , lowerCamelCase_) for x in references])
else:
UpperCamelCase = np.asarray(lowerCamelCase_)
UpperCamelCase = np.asarray(lowerCamelCase_)
if ignore_case:
UpperCamelCase = np.char.lower(lowerCamelCase_)
UpperCamelCase = np.char.lower(lowerCamelCase_)
if ignore_punctuation:
UpperCamelCase = string.punctuation.maketrans('''''' , '''''' , string.punctuation)
UpperCamelCase = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_)
UpperCamelCase = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_)
if ignore_numbers:
UpperCamelCase = string.digits.maketrans('''''' , '''''' , string.digits)
UpperCamelCase = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_)
UpperCamelCase = np.char.translate(lowerCamelCase_ , table=lowerCamelCase_)
UpperCamelCase = predictions == references
return {"exact_match": np.mean(lowerCamelCase_) * 1_0_0} | 34 |
import argparse
from collections import defaultdict
import yaml
a = 'docs/source/en/_toctree.yml'
def UpperCAmelCase_ ( UpperCAmelCase__ ):
lowercase_ = defaultdict(UpperCAmelCase__ )
for doc in model_doc:
counts[doc["local"]] += 1
lowercase_ = [key for key, value in counts.items() if value > 1]
lowercase_ = []
for duplicate_key in duplicates:
lowercase_ = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key} )
if len(UpperCAmelCase__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""" )
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1] )
# Sort
return sorted(UpperCAmelCase__ , key=lambda UpperCAmelCase__ : s["title"].lower() )
def UpperCAmelCase_ ( UpperCAmelCase__=False ):
with open(UpperCAmelCase__ , encoding="""utf-8""" ) as f:
lowercase_ = yaml.safe_load(f.read() )
# Get to the API doc
lowercase_ = 0
while content[api_idx]["title"] != "API":
api_idx += 1
lowercase_ = content[api_idx]["""sections"""]
# Then to the model doc
lowercase_ = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
lowercase_ = api_doc[model_idx]["""sections"""]
lowercase_ = [(idx, section) for idx, section in enumerate(UpperCAmelCase__ ) if """sections""" in section]
lowercase_ = False
for idx, modality_doc in modalities_docs:
lowercase_ = modality_doc["""sections"""]
lowercase_ = clean_model_doc_toc(UpperCAmelCase__ )
if old_modality_doc != new_modality_doc:
lowercase_ = True
if overwrite:
lowercase_ = new_modality_doc
if diff:
if overwrite:
lowercase_ = model_doc
lowercase_ = api_doc
with open(UpperCAmelCase__ , """w""" , encoding="""utf-8""" ) as f:
f.write(yaml.dump(UpperCAmelCase__ , allow_unicode=UpperCAmelCase__ ) )
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.')
a = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 412 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
__a : str = numpy.array([0, 0])
__a : Dict = numpy.array([0.5, 0.8_66_02_54])
__a : Optional[int] = numpy.array([1, 0])
__a : Optional[int] = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def __magic_name__ ( lowercase_ , lowercase_ ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCamelCase = initial_vectors
for _ in range(lowercase_ ):
UpperCamelCase = iteration_step(lowercase_ )
return vectors
def __magic_name__ ( lowercase_ ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCamelCase = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCamelCase = vectors[i + 1]
new_vectors.append(lowercase_ )
UpperCamelCase = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def __magic_name__ ( lowercase_ , lowercase_ ) -> numpy.ndarray:
'''simple docstring'''
UpperCamelCase = numpy.radians(lowercase_ )
UpperCamelCase , UpperCamelCase = numpy.cos(lowercase_ ), numpy.sin(lowercase_ )
UpperCamelCase = numpy.array(((c, -s), (s, c)) )
return numpy.dot(lowercase_ , lowercase_ )
def __magic_name__ ( lowercase_ ) -> None:
'''simple docstring'''
UpperCamelCase = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCamelCase , UpperCamelCase = zip(*lowercase_ )
plt.plot(lowercase_ , lowercase_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
__a : str = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 718 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__a : int = 2_0_4_8
__a : Optional[int] = 4_0_9_6
__a : Optional[int] = 4_2
__a : Optional[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
__a : Dict = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __magic_name__ ( lowercase_ ) -> List[Any]:
'''simple docstring'''
def choose_first(lowercase_ , lowercase_=False ):
assert isinstance(lowercase_ , lowercase_ )
if len(lowercase_ ) == 1:
UpperCamelCase = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
UpperCamelCase = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
UpperCamelCase = {"id": example["id"]}
UpperCamelCase = example["annotations"]
UpperCamelCase = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
UpperCamelCase = ["yes"] if 1 in yes_no_answer else ["no"]
UpperCamelCase = UpperCamelCase = []
UpperCamelCase = UpperCamelCase = []
UpperCamelCase = ["<cls>"]
else:
UpperCamelCase = ["short"]
UpperCamelCase = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
UpperCamelCase = ["long"]
UpperCamelCase = choose_first(annotation["long_answer"] , is_long_answer=lowercase_ )
UpperCamelCase = []
answer.update(lowercase_ )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
UpperCamelCase = True
else:
UpperCamelCase = False
UpperCamelCase = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , lowercase_ ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __magic_name__ ( lowercase_ , lowercase_=False ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase = _get_single_answer(lowercase_ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase = example["document"]["tokens"]
UpperCamelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(lowercase_ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
UpperCamelCase = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
UpperCamelCase = example["document"]["tokens"]
UpperCamelCase = answer["start_token"]
UpperCamelCase = answer["end_token"]
UpperCamelCase = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
UpperCamelCase = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
UpperCamelCase = doc["is_html"][answer["start_token"] : answer["end_token"]]
UpperCamelCase = doc["token"][answer["start_token"] : answer["end_token"]]
UpperCamelCase = " ".join([old[i] for i in range(len(lowercase_ ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , lowercase_ , end="\n" )
print("Old:" , lowercase_ , end="\n\n" )
return {
"context": " ".join(lowercase_ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=True ) -> int:
'''simple docstring'''
UpperCamelCase = get_context_and_ans(lowercase_ , assertion=lowercase_ )
UpperCamelCase = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
UpperCamelCase = tokenizer(example["question"]["text"] , out["context"] ).input_ids
UpperCamelCase = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = input_ids[:q_len]
UpperCamelCase = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride )
for i in doc_start_indices:
UpperCamelCase = i + max_length - q_len
UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(lowercase_ ),
"end_token": [-100] * len(lowercase_ ),
"category": category,
},
}
UpperCamelCase = out["context"].split()
UpperCamelCase = splitted_context[answer["end_token"]]
UpperCamelCase = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=lowercase_ , ).input_ids )
UpperCamelCase = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=lowercase_ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
UpperCamelCase = len(tokenizer(lowercase_ , add_special_tokens=lowercase_ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
UpperCamelCase = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
UpperCamelCase = answer["start_token"]
UpperCamelCase = answer["end_token"]
if assertion:
UpperCamelCase = tokenizer.decode(lowercase_ )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , lowercase_ , end="\n\n" )
if len(lowercase_ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
UpperCamelCase = input_ids[:q_len]
UpperCamelCase = range(lowercase_ , len(lowercase_ ) , max_length - doc_stride )
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = []
UpperCamelCase = [] # null, yes, no, long, short
for i in doc_start_indices:
UpperCamelCase = i + max_length - q_len
UpperCamelCase = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
UpperCamelCase = start_token - i + q_len
UpperCamelCase = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
UpperCamelCase = -100
UpperCamelCase = -100
answers_category.append("null" )
UpperCamelCase = inputs[-1][start_token : end_token + 1]
answers_start_token.append(lowercase_ )
answers_end_token.append(lowercase_ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(lowercase_ ) )
print("Old:" , tokenizer.decode(lowercase_ ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( lowercase_ , lowercase_ , lowercase_=2048 , lowercase_=4096 , lowercase_=False ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase = get_strided_contexts_and_ans(
lowercase_ , lowercase_ , doc_stride=lowercase_ , max_length=lowercase_ , assertion=lowercase_ , )
return example
def __magic_name__ ( lowercase_ , lowercase_ ) -> Any:
'''simple docstring'''
with jsonlines.open(lowercase_ , "a" ) as writer:
for example in tqdm(lowercase_ , total=len(lowercase_ ) , desc="Saving samples ... " ):
UpperCamelCase = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__a : Dict = load_dataset("""natural_questions""")
__a : int = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
__a : List[Any] = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
__a : Tuple = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
__a : int = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__a : Optional[int] = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
__a : List[Any] = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 414 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase_ : int , lowercase_ : Dict=7 , lowercase_ : Optional[Any]=3 , lowercase_ : Dict=30 , lowercase_ : Tuple=400 , lowercase_ : Any=True , lowercase_ : Union[str, Any]=None , lowercase_ : Dict=True , lowercase_ : List[Any]=[0.5, 0.5, 0.5] , lowercase_ : int=[0.5, 0.5, 0.5] , lowercase_ : Optional[Any]=True , lowercase_ : Dict=1 / 255 , lowercase_ : str=True , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1333}
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : int = batch_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE_ : List[Any] = max_resolution
SCREAMING_SNAKE_CASE_ : List[str] = do_resize
SCREAMING_SNAKE_CASE_ : Optional[int] = size
SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE_ : List[Any] = image_mean
SCREAMING_SNAKE_CASE_ : List[Any] = image_std
SCREAMING_SNAKE_CASE_ : str = do_rescale
SCREAMING_SNAKE_CASE_ : int = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_pad
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : List[str] , lowercase_ : Union[str, Any]=False):
'''simple docstring'''
if not batched:
SCREAMING_SNAKE_CASE_ : str = image_inputs[0]
if isinstance(lowercase_ , Image.Image):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = image.size
else:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = image.shape[1], image.shape[2]
if w < h:
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w)
SCREAMING_SNAKE_CASE_ : List[str] = self.size['''shortest_edge''']
elif w > h:
SCREAMING_SNAKE_CASE_ : Dict = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : Any = int(self.size['''shortest_edge'''] * w / h)
else:
SCREAMING_SNAKE_CASE_ : int = self.size['''shortest_edge''']
SCREAMING_SNAKE_CASE_ : List[str] = self.size['''shortest_edge''']
else:
SCREAMING_SNAKE_CASE_ : List[str] = []
for image in image_inputs:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.get_expected_values([image])
expected_values.append((expected_height, expected_width))
SCREAMING_SNAKE_CASE_ : Optional[int] = max(lowercase_ , key=lambda lowercase_: item[0])[0]
SCREAMING_SNAKE_CASE_ : List[str] = max(lowercase_ , key=lambda lowercase_: item[1])[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase__ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = DeformableDetrImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = DeformableDetrImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(lowercase_ , '''image_mean'''))
self.assertTrue(hasattr(lowercase_ , '''image_std'''))
self.assertTrue(hasattr(lowercase_ , '''do_normalize'''))
self.assertTrue(hasattr(lowercase_ , '''do_resize'''))
self.assertTrue(hasattr(lowercase_ , '''do_rescale'''))
self.assertTrue(hasattr(lowercase_ , '''do_pad'''))
self.assertTrue(hasattr(lowercase_ , '''size'''))
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1333})
self.assertEqual(image_processor.do_pad , lowercase_)
SCREAMING_SNAKE_CASE_ : List[str] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_)
self.assertEqual(image_processor.size , {'''shortest_edge''': 42, '''longest_edge''': 84})
self.assertEqual(image_processor.do_pad , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict)
# create random PIL images
SCREAMING_SNAKE_CASE_ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , Image.Image)
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.image_processor_tester.get_expected_values(lowercase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_)
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(lowercase_ , return_tensors='''pt''').pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
SCREAMING_SNAKE_CASE_ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , np.ndarray)
# Test not batched input
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(lowercase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(lowercase_ , return_tensors='''pt''').pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[Any] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
SCREAMING_SNAKE_CASE_ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_)
for image in image_inputs:
self.assertIsInstance(lowercase_ , torch.Tensor)
# Test not batched input
SCREAMING_SNAKE_CASE_ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''').pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(lowercase_)
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_processing(lowercase_ , return_tensors='''pt''').pixel_values
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_)
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[int] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''') as f:
SCREAMING_SNAKE_CASE_ : int = json.loads(f.read())
SCREAMING_SNAKE_CASE_ : Any = {'''image_id''': 39769, '''annotations''': target}
# encode them
SCREAMING_SNAKE_CASE_ : Tuple = DeformableDetrImageProcessor()
SCREAMING_SNAKE_CASE_ : Dict = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors='''pt''')
# verify pixel values
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['''pixel_values'''].shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase_ , atol=1e-4))
# verify area
SCREAMING_SNAKE_CASE_ : str = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase_))
# verify boxes
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase_ , atol=1e-3))
# verify image_id
SCREAMING_SNAKE_CASE_ : Dict = torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase_))
# verify is_crowd
SCREAMING_SNAKE_CASE_ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase_))
# verify class_labels
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase_))
# verify orig_size
SCREAMING_SNAKE_CASE_ : int = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase_))
# verify size
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase_))
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''') as f:
SCREAMING_SNAKE_CASE_ : Dict = json.loads(f.read())
SCREAMING_SNAKE_CASE_ : List[Any] = {'''file_name''': '''000000039769.png''', '''image_id''': 39769, '''segments_info''': target}
SCREAMING_SNAKE_CASE_ : Tuple = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''')
# encode them
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DeformableDetrImageProcessor(format='''coco_panoptic''')
SCREAMING_SNAKE_CASE_ : Dict = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors='''pt''')
# verify pixel values
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size([1, 3, 800, 1066])
self.assertEqual(encoding['''pixel_values'''].shape , lowercase_)
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([0.27_96, 0.31_38, 0.34_81])
self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , lowercase_ , atol=1e-4))
# verify area
SCREAMING_SNAKE_CASE_ : str = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , lowercase_))
# verify boxes
SCREAMING_SNAKE_CASE_ : Tuple = torch.Size([6, 4])
self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , lowercase_)
SCREAMING_SNAKE_CASE_ : int = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , lowercase_ , atol=1e-3))
# verify image_id
SCREAMING_SNAKE_CASE_ : Any = torch.tensor([39769])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , lowercase_))
# verify is_crowd
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , lowercase_))
# verify class_labels
SCREAMING_SNAKE_CASE_ : int = torch.tensor([17, 17, 63, 75, 75, 93])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , lowercase_))
# verify masks
SCREAMING_SNAKE_CASE_ : List[Any] = 822873
self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , lowercase_)
# verify orig_size
SCREAMING_SNAKE_CASE_ : List[str] = torch.tensor([480, 640])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , lowercase_))
# verify size
SCREAMING_SNAKE_CASE_ : int = torch.tensor([800, 1066])
self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , lowercase_))
| 512 |
"""simple docstring"""
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Any , lowercase_ : str , lowercase_ : Dict=13 , lowercase_ : Dict=32 , lowercase_ : Any=3 , lowercase_ : Any=4 , lowercase_ : int=[10, 20, 30, 40] , lowercase_ : Union[str, Any]=[2, 2, 3, 2] , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Dict=37 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Union[str, Any]=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : Tuple=["stage2", "stage3", "stage4"] , lowercase_ : Optional[Any]=[2, 3, 4] , lowercase_ : Optional[int]=None , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = parent
SCREAMING_SNAKE_CASE_ : Any = batch_size
SCREAMING_SNAKE_CASE_ : List[Any] = image_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE_ : List[Any] = num_stages
SCREAMING_SNAKE_CASE_ : List[str] = hidden_sizes
SCREAMING_SNAKE_CASE_ : Optional[int] = depths
SCREAMING_SNAKE_CASE_ : str = is_training
SCREAMING_SNAKE_CASE_ : int = use_labels
SCREAMING_SNAKE_CASE_ : Dict = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE_ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = out_features
SCREAMING_SNAKE_CASE_ : List[str] = out_indices
SCREAMING_SNAKE_CASE_ : int = scope
def _SCREAMING_SNAKE_CASE ( self : Tuple):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels)
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=lowercase_ , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextModel(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowercase_ : str , lowercase_ : Dict , lowercase_ : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextForImageClassification(lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : List[Any] = model(lowercase_ , labels=lowercase_)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Any , lowercase_ : Dict):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : int = ConvNextBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Any = model(lowercase_)
# verify hidden states
self.parent.assertEqual(len(result.feature_maps) , len(config.out_features))
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[1], 4, 4])
# verify channels
self.parent.assertEqual(len(model.channels) , len(config.out_features))
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:])
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE_ : Optional[Any] = None
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ConvNextBackbone(config=lowercase_)
model.to(lowercase_)
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(lowercase_)
# verify feature maps
self.parent.assertEqual(len(result.feature_maps) , 1)
self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.hidden_sizes[-1], 1, 1])
# verify channels
self.parent.assertEqual(len(model.channels) , 1)
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]])
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : Dict = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
__UpperCamelCase = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase = True
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[str] = ConvNextModelTester(self)
SCREAMING_SNAKE_CASE_ : int = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return
@unittest.skip(reason='''ConvNext does not use inputs_embeds''')
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not support input and output embeddings''')
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
pass
@unittest.skip(reason='''ConvNext does not use feedforward chunking''')
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Dict = model_class(lowercase_)
SCREAMING_SNAKE_CASE_ : int = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : str):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
def check_hidden_states_output(lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : str):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model_class(lowercase_)
model.to(lowercase_)
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_))
SCREAMING_SNAKE_CASE_ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.num_stages
self.assertEqual(len(lowercase_) , expected_num_stages + 1)
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Tuple = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(lowercase_ , lowercase_ , lowercase_)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_)
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : int = ConvNextModel.from_pretrained(lowercase_)
self.assertIsNotNone(lowercase_)
def _A () -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[str]):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('''facebook/convnext-tiny-224''') if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : int):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = ConvNextForImageClassification.from_pretrained('''facebook/convnext-tiny-224''').to(lowercase_)
SCREAMING_SNAKE_CASE_ : str = self.default_image_processor
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=lowercase_ , return_tensors='''pt''').to(lowercase_)
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**lowercase_)
# verify the logits
SCREAMING_SNAKE_CASE_ : List[Any] = torch.Size((1, 1000))
self.assertEqual(outputs.logits.shape , lowercase_)
SCREAMING_SNAKE_CASE_ : int = torch.tensor([-0.02_60, -0.47_39, 0.19_11]).to(lowercase_)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1e-4))
@require_torch
class lowerCAmelCase__ ( unittest.TestCase , UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = (ConvNextBackbone,) if is_torch_available() else ()
__UpperCamelCase = ConvNextConfig
__UpperCamelCase = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = ConvNextModelTester(self)
| 512 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : str = {
'''configuration_timesformer''': ['''TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TimesformerConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Optional[int] = [
'''TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimesformerModel''',
'''TimesformerForVideoClassification''',
'''TimesformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 39 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase ( lowerCamelCase : List[str] ):
'''simple docstring'''
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class UpperCAmelCase__ ( UpperCamelCase__ ):
@staticmethod
def UpperCAmelCase_ ( UpperCamelCase ) -> Tuple:
__lowerCAmelCase = parser.add_parser("download" )
download_parser.add_argument(
"--cache-dir" , type=UpperCamelCase , default=UpperCamelCase , help="Path to location to store the models" )
download_parser.add_argument(
"--force" , action="store_true" , help="Force the model to be download even if already in cache-dir" )
download_parser.add_argument(
"--trust-remote-code" , action="store_true" , help="Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine" , )
download_parser.add_argument("model" , type=UpperCamelCase , help="Name of the model to download" )
download_parser.set_defaults(func=UpperCamelCase )
def __init__( self , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) -> List[str]:
__lowerCAmelCase = model
__lowerCAmelCase = cache
__lowerCAmelCase = force
__lowerCAmelCase = trust_remote_code
def UpperCAmelCase_ ( self ) -> Any:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 39 | 1 |
from collections.abc import Callable
def a ( snake_case__: Callable[[float], float] , snake_case__: float , snake_case__: float ):
'''simple docstring'''
lowercase_ = a
lowercase_ = b
if function(snake_case__ ) == 0: # one of the a or b is a root for the function
return a
elif function(snake_case__ ) == 0:
return b
elif (
function(snake_case__ ) * function(snake_case__ ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError('''could not find root in given interval.''' )
else:
lowercase_ = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(snake_case__ ) == 0:
return mid
elif function(snake_case__ ) * function(snake_case__ ) < 0:
lowercase_ = mid
else:
lowercase_ = mid
lowercase_ = start + (end - start) / 2.0
return mid
def a ( snake_case__: float ):
'''simple docstring'''
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_0_0_0))
import doctest
doctest.testmod()
| 97 |
"""simple docstring"""
import warnings
from .generation import TFGenerationMixin
class A_(SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
warnings.warn(
"""Importing `TFGenerationMixin` from `src/transformers/generation_tf_utils.py` is deprecated and will """
"""be removed in Transformers v5. Import as `from transformers import TFGenerationMixin` instead.""" , SCREAMING_SNAKE_CASE_ , )
| 437 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_A = logging.get_logger(__name__)
_A = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def UpperCAmelCase ( a_, a_, a_, a_, a_ ):
'''simple docstring'''
for attribute in key.split('.' ):
lowerCamelCase : Union[str, Any] = getattr(a_, a_ )
if weight_type is not None:
lowerCamelCase : List[Any] = getattr(a_, a_ ).shape
else:
lowerCamelCase : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowerCamelCase : Any = value
elif weight_type == "weight_g":
lowerCamelCase : int = value
elif weight_type == "weight_v":
lowerCamelCase : Tuple = value
elif weight_type == "bias":
lowerCamelCase : List[str] = value
else:
lowerCamelCase : List[Any] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCAmelCase ( a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = []
lowerCamelCase : str = fairseq_model.state_dict()
lowerCamelCase : Dict = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowerCamelCase : int = False
if "conv_layers" in name:
load_conv_layer(
a_, a_, a_, a_, hf_model.config.feat_extract_norm == 'group', )
lowerCamelCase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
lowerCamelCase : Optional[int] = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
lowerCamelCase : List[str] = True
if "*" in mapped_key:
lowerCamelCase : Union[str, Any] = name.split(a_ )[0].split('.' )[-2]
lowerCamelCase : Any = mapped_key.replace('*', a_ )
if "weight_g" in name:
lowerCamelCase : Tuple = 'weight_g'
elif "weight_v" in name:
lowerCamelCase : Dict = 'weight_v'
elif "weight" in name:
lowerCamelCase : Dict = 'weight'
elif "bias" in name:
lowerCamelCase : List[str] = 'bias'
else:
lowerCamelCase : Optional[int] = None
set_recursively(a_, a_, a_, a_, a_ )
continue
if not is_used:
unused_weights.append(a_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCAmelCase ( a_, a_, a_, a_, a_ ):
'''simple docstring'''
lowerCamelCase : str = full_name.split('conv_layers.' )[-1]
lowerCamelCase : Optional[Any] = name.split('.' )
lowerCamelCase : Optional[int] = int(items[0] )
lowerCamelCase : Optional[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowerCamelCase : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowerCamelCase : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowerCamelCase : Any = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowerCamelCase : Optional[int] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(a_ )
def UpperCAmelCase ( a_, a_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = SEWConfig()
if is_finetuned:
lowerCamelCase : Union[str, Any] = model.wav_encoder.wav_model.cfg
else:
lowerCamelCase : str = model.cfg
lowerCamelCase : Any = fs_config.conv_bias
lowerCamelCase : Any = eval(fs_config.conv_feature_layers )
lowerCamelCase : Any = [x[0] for x in conv_layers]
lowerCamelCase : Union[str, Any] = [x[1] for x in conv_layers]
lowerCamelCase : Optional[Any] = [x[2] for x in conv_layers]
lowerCamelCase : str = 'gelu'
lowerCamelCase : str = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
lowerCamelCase : Optional[int] = 0.0
lowerCamelCase : Optional[Any] = fs_config.activation_fn.name
lowerCamelCase : Union[str, Any] = fs_config.encoder_embed_dim
lowerCamelCase : Any = 0.0_2
lowerCamelCase : Dict = fs_config.encoder_ffn_embed_dim
lowerCamelCase : List[str] = 1E-5
lowerCamelCase : Optional[int] = fs_config.encoder_layerdrop
lowerCamelCase : Union[str, Any] = fs_config.encoder_attention_heads
lowerCamelCase : Optional[int] = fs_config.conv_pos_groups
lowerCamelCase : List[str] = fs_config.conv_pos
lowerCamelCase : Dict = len(a_ )
lowerCamelCase : List[Any] = fs_config.encoder_layers
lowerCamelCase : Dict = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowerCamelCase : Tuple = model.cfg
lowerCamelCase : Union[str, Any] = fs_config.final_dropout
lowerCamelCase : str = fs_config.layerdrop
lowerCamelCase : str = fs_config.activation_dropout
lowerCamelCase : int = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowerCamelCase : str = fs_config.attention_dropout
lowerCamelCase : Optional[int] = fs_config.dropout_input
lowerCamelCase : Dict = fs_config.dropout
lowerCamelCase : Tuple = fs_config.mask_channel_length
lowerCamelCase : List[Any] = fs_config.mask_channel_prob
lowerCamelCase : Any = fs_config.mask_length
lowerCamelCase : List[str] = fs_config.mask_prob
lowerCamelCase : int = 'Wav2Vec2FeatureExtractor'
lowerCamelCase : str = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCAmelCase ( a_, a_, a_=None, a_=None, a_=True ):
'''simple docstring'''
if is_finetuned:
lowerCamelCase : Dict = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path], arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
lowerCamelCase : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowerCamelCase : Optional[Any] = SEWConfig.from_pretrained(a_ )
else:
lowerCamelCase : Any = convert_config(model[0], a_ )
lowerCamelCase : Tuple = model[0].eval()
lowerCamelCase : str = True if config.feat_extract_norm == 'layer' else False
lowerCamelCase : List[str] = WavaVecaFeatureExtractor(
feature_size=1, sampling_rate=1_6000, padding_value=0, do_normalize=a_, return_attention_mask=a_, )
if is_finetuned:
if dict_path:
lowerCamelCase : Dict = Dictionary.load(a_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowerCamelCase : List[str] = target_dict.pad_index
lowerCamelCase : List[Any] = target_dict.bos_index
lowerCamelCase : Any = target_dict.pad_index
lowerCamelCase : List[str] = target_dict.bos_index
lowerCamelCase : Dict = target_dict.eos_index
lowerCamelCase : Optional[Any] = len(target_dict.symbols )
lowerCamelCase : List[Any] = os.path.join(a_, 'vocab.json' )
if not os.path.isdir(a_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(a_ ) )
return
os.makedirs(a_, exist_ok=a_ )
with open(a_, 'w', encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices, a_ )
lowerCamelCase : int = WavaVecaCTCTokenizer(
a_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token='|', do_lower_case=a_, )
lowerCamelCase : Tuple = WavaVecaProcessor(feature_extractor=a_, tokenizer=a_ )
processor.save_pretrained(a_ )
lowerCamelCase : Any = SEWForCTC(a_ )
else:
lowerCamelCase : Tuple = SEWModel(a_ )
feature_extractor.save_pretrained(a_ )
recursively_load_weights(a_, a_, a_ )
hf_model.save_pretrained(a_ )
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_A = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 701 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
_A = 2_5_0_0_0_4
_A = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class _lowercase ( __UpperCAmelCase , unittest.TestCase ):
lowercase_ = MBartaaTokenizer
lowercase_ = MBartaaTokenizerFast
lowercase_ = True
lowercase_ = True
def _UpperCamelCase ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = MBartaaTokenizer(UpperCAmelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ) -> Tuple:
lowerCamelCase : str = '<s>'
lowerCamelCase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCAmelCase_ ) , UpperCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> List[Any]:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(UpperCAmelCase_ ) , 1054 )
def _UpperCamelCase ( self ) -> Optional[int]:
self.assertEqual(self.get_tokenizer().vocab_size , 1054 )
def _UpperCamelCase ( self ) -> str:
lowerCamelCase : Optional[int] = MBartaaTokenizer(UpperCAmelCase_ , src_lang='en_XX' , tgt_lang='ro_RO' , keep_accents=UpperCAmelCase_ )
lowerCamelCase : Dict = tokenizer.tokenize('This is a test' )
self.assertListEqual(UpperCAmelCase_ , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(UpperCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowerCamelCase : Any = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.'] , )
lowerCamelCase : Dict = tokenizer.convert_tokens_to_ids(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowerCamelCase : List[Any] = tokenizer.convert_ids_to_tokens(UpperCAmelCase_ )
self.assertListEqual(
UpperCAmelCase_ , [SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.'] , )
@slow
def _UpperCamelCase ( self ) -> List[Any]:
# fmt: off
lowerCamelCase : Optional[Any] = {'input_ids': [[250004, 11062, 82772, 7, 15, 82772, 538, 51529, 237, 17198, 1290, 206, 9, 215175, 1314, 136, 17198, 1290, 206, 9, 56359, 42, 122009, 9, 16466, 16, 87344, 4537, 9, 4717, 78381, 6, 159958, 7, 15, 24480, 618, 4, 527, 22693, 5428, 4, 2777, 24480, 9874, 4, 43523, 594, 4, 803, 18392, 33189, 18, 4, 43523, 24447, 12399, 100, 24955, 83658, 9626, 144057, 15, 839, 22335, 16, 136, 24955, 83658, 83479, 15, 39102, 724, 16, 678, 645, 2789, 1328, 4589, 42, 122009, 115774, 23, 805, 1328, 46876, 7, 136, 53894, 1940, 42227, 41159, 17721, 823, 425, 4, 27512, 98722, 206, 136, 5531, 4970, 919, 17336, 5, 2], [250004, 20080, 618, 83, 82775, 47, 479, 9, 1517, 73, 53894, 333, 80581, 110117, 18811, 5256, 1295, 51, 152526, 297, 7986, 390, 124416, 538, 35431, 214, 98, 15044, 25737, 136, 7108, 43701, 23, 756, 135355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250004, 581, 63773, 119455, 6, 147797, 88203, 7, 645, 70, 21, 3285, 10269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=UpperCAmelCase_ , model_name='facebook/mbart-large-50' , revision='d3913889c59cd5c9e456b269c376325eabad57e2' , )
def _UpperCamelCase ( self ) -> List[str]:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowerCamelCase : int = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart50', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCamelCase : Tuple = tempfile.mkdtemp()
lowerCamelCase : Any = tokenizer_r.save_pretrained(UpperCAmelCase_ )
lowerCamelCase : List[str] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
lowerCamelCase : int = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f )
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCamelCase : Dict = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=True
lowerCamelCase : Optional[int] = tempfile.mkdtemp()
lowerCamelCase : Union[str, Any] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCamelCase : List[Any] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(UpperCAmelCase_ , UpperCAmelCase_ )
# Checks everything loads correctly in the same way
lowerCamelCase : Dict = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCamelCase : Optional[int] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
# Save tokenizer rust, legacy_format=False
lowerCamelCase : Union[str, Any] = tempfile.mkdtemp()
lowerCamelCase : List[str] = tokenizer_r.save_pretrained(UpperCAmelCase_ , legacy_format=UpperCAmelCase_ )
lowerCamelCase : Optional[int] = tokenizer_p.save_pretrained(UpperCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowerCamelCase : int = tokenizer_r.from_pretrained(UpperCAmelCase_ )
lowerCamelCase : Union[str, Any] = tokenizer_p.from_pretrained(UpperCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
shutil.rmtree(UpperCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowercase ( unittest.TestCase ):
lowercase_ = 'facebook/mbart-large-50-one-to-many-mmt'
lowercase_ = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
lowercase_ = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
lowercase_ = [EN_CODE, 8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2]
@classmethod
def _UpperCamelCase ( cls ) -> int:
lowerCamelCase : MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='en_XX' , tgt_lang='ro_RO' )
lowerCamelCase : Union[str, Any] = 1
return cls
def _UpperCamelCase ( self ) -> int:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'] , 250001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'] , 250004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'] , 250020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['mr_IN'] , 250038 )
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
self.assertIn(UpperCAmelCase_ , self.tokenizer.all_special_ids )
lowerCamelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2]
lowerCamelCase : Union[str, Any] = self.tokenizer.decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowerCamelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=UpperCAmelCase_ )
self.assertEqual(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> Dict:
lowerCamelCase : Tuple = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , UpperCAmelCase_ )
lowerCamelCase : str = 10
lowerCamelCase : Optional[int] = self.tokenizer(UpperCAmelCase_ , max_length=UpperCAmelCase_ , truncation=UpperCAmelCase_ ).input_ids[0]
self.assertEqual(ids[0] , UpperCAmelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(UpperCAmelCase_ ) , UpperCAmelCase_ )
def _UpperCamelCase ( self ) -> List[str]:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR'] ) , [250053, 250001] )
def _UpperCamelCase ( self ) -> Optional[Any]:
lowerCamelCase : List[str] = tempfile.mkdtemp()
lowerCamelCase : Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(UpperCAmelCase_ )
lowerCamelCase : str = MBartaaTokenizer.from_pretrained(UpperCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , UpperCAmelCase_ )
@require_torch
def _UpperCamelCase ( self ) -> Any:
lowerCamelCase : Tuple = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , return_tensors='pt' )
lowerCamelCase : Any = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _UpperCamelCase ( self ) -> Optional[int]:
lowerCamelCase : int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors='pt' , )
lowerCamelCase : Dict = shift_tokens_right(batch['labels'] , self.tokenizer.pad_token_id )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
lowerCamelCase : Dict = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , UpperCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _UpperCamelCase ( self ) -> Union[str, Any]:
lowerCamelCase : Optional[Any] = self.tokenizer(self.src_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=3 , return_tensors='pt' )
lowerCamelCase : Optional[Any] = self.tokenizer(
text_target=self.tgt_text , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=10 , return_tensors='pt' )
lowerCamelCase : List[Any] = targets['input_ids']
lowerCamelCase : List[Any] = shift_tokens_right(UpperCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def _UpperCamelCase ( self ) -> List[str]:
lowerCamelCase : List[Any] = self.tokenizer._build_translation_inputs(
'A test' , return_tensors='pt' , src_lang='en_XX' , tgt_lang='ar_AR' )
self.assertEqual(
nested_simplify(UpperCAmelCase_ ) , {
# en_XX, A, test, EOS
'input_ids': [[250004, 62, 3034, 2]],
'attention_mask': [[1, 1, 1, 1]],
# ar_AR
'forced_bos_token_id': 250001,
} , )
| 133 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[str] ):
"""simple docstring"""
# test for the above condition
self.test()
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
_snake_case = 0
_snake_case = False
while not completed:
if counter == 1:
self.reset()
_snake_case = self.advance()
if not self.does_advance(__lowerCamelCase ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
_snake_case , _snake_case , _snake_case = self.update(__lowerCamelCase )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : Tuple , __lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def __UpperCAmelCase ( self : Any , __lowerCamelCase : Optional[Any]=False ):
"""simple docstring"""
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict , __lowerCamelCase : List[int] ):
"""simple docstring"""
super(__lowerCamelCase , self ).__init__()
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(__lowerCamelCase , __lowerCamelCase ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
_snake_case = token_ids
_snake_case = len(self.token_ids )
_snake_case = -1 # the index of the currently fulfilled step
_snake_case = False
def __UpperCAmelCase ( self : str ):
"""simple docstring"""
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : int , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCamelCase )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(__lowerCamelCase )}""" )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(__lowerCamelCase ):
self.fulfilled_idx += 1
_snake_case = True
if self.fulfilled_idx == (self.seqlen - 1):
_snake_case = True
_snake_case = completed
else:
# failed to make progress.
_snake_case = True
self.reset()
return stepped, completed, reset
def __UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
_snake_case = False
_snake_case = 0
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return self.seqlen - (self.fulfilled_idx + 1)
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : str=False ):
"""simple docstring"""
_snake_case = PhrasalConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.fulfilled_idx
_snake_case = self.completed
return new_constraint
class UpperCAmelCase :
def __init__( self : Optional[Any] , __lowerCamelCase : List[List[int]] , __lowerCamelCase : Optional[Any]=True ):
"""simple docstring"""
_snake_case = max([len(__lowerCamelCase ) for one in nested_token_ids] )
_snake_case = {}
for token_ids in nested_token_ids:
_snake_case = root
for tidx, token_id in enumerate(__lowerCamelCase ):
if token_id not in level:
_snake_case = {}
_snake_case = level[token_id]
if no_subsets and self.has_subsets(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f""" {nested_token_ids}.""" )
_snake_case = root
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Dict ):
"""simple docstring"""
_snake_case = self.trie
for current_token in current_seq:
_snake_case = start[current_token]
_snake_case = list(start.keys() )
return next_tokens
def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Optional[Any] ):
"""simple docstring"""
_snake_case = self.next_tokens(__lowerCamelCase )
return len(__lowerCamelCase ) == 0
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : int ):
"""simple docstring"""
_snake_case = list(root.values() )
if len(__lowerCamelCase ) == 0:
return 1
else:
return sum([self.count_leaves(__lowerCamelCase ) for nn in next_nodes] )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : str ):
"""simple docstring"""
_snake_case = self.count_leaves(__lowerCamelCase )
return len(__lowerCamelCase ) != leaf_count
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
def __init__( self : int , __lowerCamelCase : List[List[int]] ):
"""simple docstring"""
super(__lowerCamelCase , self ).__init__()
if not isinstance(__lowerCamelCase , __lowerCamelCase ) or len(__lowerCamelCase ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(__lowerCamelCase , __lowerCamelCase ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(__lowerCamelCase , __lowerCamelCase ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
_snake_case = DisjunctiveTrie(__lowerCamelCase )
_snake_case = nested_token_ids
_snake_case = self.trie.max_height
_snake_case = []
_snake_case = False
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.trie.next_tokens(self.current_seq )
if len(__lowerCamelCase ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCamelCase )}""" )
_snake_case = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(__lowerCamelCase )}""" )
_snake_case = False
_snake_case = False
_snake_case = False
if self.does_advance(__lowerCamelCase ):
self.current_seq.append(__lowerCamelCase )
_snake_case = True
else:
_snake_case = True
self.reset()
_snake_case = self.trie.reached_leaf(self.current_seq )
_snake_case = completed
return stepped, completed, reset
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = False
_snake_case = []
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __UpperCAmelCase ( self : Optional[int] , __lowerCamelCase : Optional[int]=False ):
"""simple docstring"""
_snake_case = DisjunctiveConstraint(self.token_ids )
if stateful:
_snake_case = self.seqlen
_snake_case = self.current_seq
_snake_case = self.completed
return new_constraint
class UpperCAmelCase :
def __init__( self : Optional[int] , __lowerCamelCase : List[Constraint] ):
"""simple docstring"""
_snake_case = constraints
# max # of steps required to fulfill a given constraint
_snake_case = max([c.seqlen for c in constraints] )
_snake_case = len(__lowerCamelCase )
_snake_case = False
self.init_state()
def __UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_snake_case = []
_snake_case = None
_snake_case = [constraint.copy(stateful=__lowerCamelCase ) for constraint in self.constraints]
def __UpperCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_snake_case = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
_snake_case = constraint.advance()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
token_list.append(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
token_list.extend(__lowerCamelCase )
else:
_snake_case = self.inprogress_constraint.advance()
if isinstance(__lowerCamelCase , __lowerCamelCase ):
token_list.append(__lowerCamelCase )
elif isinstance(__lowerCamelCase , __lowerCamelCase ):
token_list.extend(__lowerCamelCase )
if len(__lowerCamelCase ) == 0:
return None
else:
return token_list
def __UpperCAmelCase ( self : int , __lowerCamelCase : Optional[List[int]] ):
"""simple docstring"""
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
_snake_case , _snake_case = self.add(__lowerCamelCase )
# the entire list of constraints are fulfilled
if self.completed:
break
def __UpperCAmelCase ( self : Any , __lowerCamelCase : int ):
"""simple docstring"""
if not isinstance(__lowerCamelCase , __lowerCamelCase ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
_snake_case , _snake_case = False, False
if self.completed:
_snake_case = True
_snake_case = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
_snake_case , _snake_case , _snake_case = self.inprogress_constraint.update(__lowerCamelCase )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=__lowerCamelCase ) )
_snake_case = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
_snake_case = None
if len(self.pending_constraints ) == 0:
# we're done!
_snake_case = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(__lowerCamelCase ):
_snake_case , _snake_case , _snake_case = pending_constraint.update(__lowerCamelCase )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(__lowerCamelCase )
_snake_case = None
if not complete and stepped:
_snake_case = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
_snake_case = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
_snake_case = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __UpperCAmelCase ( self : Dict , __lowerCamelCase : Union[str, Any]=True ):
"""simple docstring"""
_snake_case = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
_snake_case = [
constraint.copy(stateful=__lowerCamelCase ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
_snake_case = self.inprogress_constraint.copy(stateful=__lowerCamelCase )
_snake_case = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 103 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__A = {
"configuration_pix2struct": [
"PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Pix2StructConfig",
"Pix2StructTextConfig",
"Pix2StructVisionConfig",
],
"processing_pix2struct": ["Pix2StructProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["Pix2StructImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Pix2StructPreTrainedModel",
"Pix2StructForConditionalGeneration",
"Pix2StructVisionModel",
"Pix2StructTextModel",
]
if TYPE_CHECKING:
from .configuration_pixastruct import (
PIX2STRUCT_PRETRAINED_CONFIG_ARCHIVE_MAP,
PixaStructConfig,
PixaStructTextConfig,
PixaStructVisionConfig,
)
from .processing_pixastruct import PixaStructProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_pixastruct import PixaStructImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pixastruct import (
PIX2STRUCT_PRETRAINED_MODEL_ARCHIVE_LIST,
PixaStructForConditionalGeneration,
PixaStructPreTrainedModel,
PixaStructTextModel,
PixaStructVisionModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 59 | 0 |
'''simple docstring'''
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase_ :
def __init__( self , lowercase_ , lowercase_ , lowercase_):
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0")
snake_case_ : List[str] = img
snake_case_ : str = img.shape[1]
snake_case_ : Tuple = img.shape[0]
snake_case_ : Union[str, Any] = dst_width
snake_case_ : str = dst_height
snake_case_ : str = self.src_w / self.dst_w
snake_case_ : List[Any] = self.src_h / self.dst_h
snake_case_ : str = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_55
)
def snake_case__ ( self):
for i in range(self.dst_h):
for j in range(self.dst_w):
snake_case_ : Optional[int] = self.img[self.get_y(lowercase_)][self.get_x(lowercase_)]
def snake_case__ ( self , lowercase_):
return int(self.ratio_x * x)
def snake_case__ ( self , lowercase_):
return int(self.ratio_y * y)
if __name__ == "__main__":
a_ ,a_ = 800, 600
a_ = imread("image_data/lena.jpg", 1)
a_ = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f"""Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}""", n.output
)
waitKey(0)
destroyAllWindows()
| 713 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Any = [0] * no_of_processes
snake_case_ : Optional[int] = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__SCREAMING_SNAKE_CASE ):
snake_case_ : str = burst_time[i]
snake_case_ : list[int] = []
snake_case_ : Tuple = 0
snake_case_ : Optional[int] = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
snake_case_ : Any = []
snake_case_ : Dict = -1
for i in range(__SCREAMING_SNAKE_CASE ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
snake_case_ : Dict = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
snake_case_ : Optional[int] = i
total_time += burst_time[target_process]
completed += 1
snake_case_ : Tuple = 0
snake_case_ : int = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[int] = [0] * no_of_processes
for i in range(__SCREAMING_SNAKE_CASE ):
snake_case_ : Dict = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
a_ = 4
a_ = [2, 5, 3, 7]
a_ = [0, 0, 0, 0]
a_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
a_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 92 | 0 |
def lowercase__ ( A_: Tuple ) -> List[Any]:
"""simple docstring"""
__UpperCAmelCase =len(A_ )
for i in range(length - 1 ):
__UpperCAmelCase =i
for k in range(i + 1 , A_ ):
if collection[k] < collection[least]:
__UpperCAmelCase =k
if least != i:
__UpperCAmelCase , __UpperCAmelCase =(collection[i], collection[least])
return collection
if __name__ == "__main__":
__A = input("Enter numbers separated by a comma:\n").strip()
__A = [int(item) for item in user_input.split(",")]
print(selection_sort(unsorted))
| 68 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _A ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : Dict=7 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : List[str]=True , __SCREAMING_SNAKE_CASE : Optional[Any]=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : Optional[Any]=99 , __SCREAMING_SNAKE_CASE : Dict=32 , __SCREAMING_SNAKE_CASE : int=5 , __SCREAMING_SNAKE_CASE : Dict=4 , __SCREAMING_SNAKE_CASE : str=37 , __SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.1 , __SCREAMING_SNAKE_CASE : str=512 , __SCREAMING_SNAKE_CASE : Dict=16 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : List[str]=4 , ) -> Optional[Any]:
__UpperCAmelCase =parent
__UpperCAmelCase =batch_size
__UpperCAmelCase =seq_length
__UpperCAmelCase =is_training
__UpperCAmelCase =use_attention_mask
__UpperCAmelCase =use_token_type_ids
__UpperCAmelCase =use_labels
__UpperCAmelCase =vocab_size
__UpperCAmelCase =hidden_size
__UpperCAmelCase =num_hidden_layers
__UpperCAmelCase =num_attention_heads
__UpperCAmelCase =intermediate_size
__UpperCAmelCase =hidden_act
__UpperCAmelCase =hidden_dropout_prob
__UpperCAmelCase =attention_probs_dropout_prob
__UpperCAmelCase =max_position_embeddings
__UpperCAmelCase =type_vocab_size
__UpperCAmelCase =type_sequence_label_size
__UpperCAmelCase =initializer_range
__UpperCAmelCase =num_choices
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase =None
if self.use_attention_mask:
__UpperCAmelCase =random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase =None
if self.use_token_type_ids:
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase =RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a ( self : List[str] ) -> Dict:
__UpperCAmelCase =self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase =config_and_inputs
__UpperCAmelCase =True
__UpperCAmelCase =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__UpperCAmelCase =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _A ( UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : Union[str, Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : List[Any] ) -> List[str]:
__UpperCAmelCase =FlaxRobertaModelTester(self )
@slow
def _a ( self : Optional[Any] ) -> List[Any]:
for model_class_name in self.all_model_classes:
__UpperCAmelCase =model_class_name.from_pretrained("""roberta-base""" , from_pt=__SCREAMING_SNAKE_CASE )
__UpperCAmelCase =model(np.ones((1, 1) ) )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
| 68 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class _snake_case :
def __init__( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Any = data
SCREAMING_SNAKE_CASE:Node | None = None
class _snake_case :
def __init__( self : Optional[int] ):
SCREAMING_SNAKE_CASE:List[Any] = None
SCREAMING_SNAKE_CASE:List[str] = None
def __iter__( self : Optional[Any] ):
SCREAMING_SNAKE_CASE:Tuple = self.head
while self.head:
yield node.data
SCREAMING_SNAKE_CASE:List[str] = node.next
if node == self.head:
break
def __len__( self : List[str] ):
return sum(1 for _ in self )
def __repr__( self : List[Any] ):
return "->".join(str(SCREAMING_SNAKE_CASE__ ) for item in iter(self ) )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ):
self.insert_nth(len(self ) ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ):
self.insert_nth(0 ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ):
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
SCREAMING_SNAKE_CASE:List[str] = Node(SCREAMING_SNAKE_CASE__ )
if self.head is None:
SCREAMING_SNAKE_CASE:Any = new_node # first node points itself
SCREAMING_SNAKE_CASE:Optional[int] = new_node
elif index == 0: # insert at head
SCREAMING_SNAKE_CASE:Optional[int] = self.head
SCREAMING_SNAKE_CASE:Tuple = new_node
else:
SCREAMING_SNAKE_CASE:List[str] = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE:Optional[Any] = temp.next
SCREAMING_SNAKE_CASE:str = temp.next
SCREAMING_SNAKE_CASE:Union[str, Any] = new_node
if index == len(self ) - 1: # insert at tail
SCREAMING_SNAKE_CASE:Dict = new_node
def __UpperCamelCase ( self : int ):
return self.delete_nth(0 )
def __UpperCamelCase ( self : Union[str, Any] ):
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : int = 0 ):
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
SCREAMING_SNAKE_CASE:List[Any] = self.head
if self.head == self.tail: # just one node
SCREAMING_SNAKE_CASE:List[str] = None
elif index == 0: # delete head node
SCREAMING_SNAKE_CASE:Union[str, Any] = self.tail.next.next
SCREAMING_SNAKE_CASE:Optional[int] = self.head.next
else:
SCREAMING_SNAKE_CASE:Any = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE:Tuple = temp.next
SCREAMING_SNAKE_CASE:List[Any] = temp.next
SCREAMING_SNAKE_CASE:Optional[int] = temp.next.next
if index == len(self ) - 1: # delete at tail
SCREAMING_SNAKE_CASE:Union[str, Any] = temp
return delete_node.data
def __UpperCamelCase ( self : List[Any] ):
return len(self ) == 0
def A_ ( ):
SCREAMING_SNAKE_CASE:Union[str, Any] = CircularLinkedList()
assert len(snake_case ) == 0
assert circular_linked_list.is_empty() is True
assert str(snake_case ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(snake_case ) == i
circular_linked_list.insert_nth(snake_case , i + 1 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(snake_case ) == "->".join(str(snake_case ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 465 |
'''simple docstring'''
import random
def A_ ( snake_case , snake_case , snake_case = False ):
SCREAMING_SNAKE_CASE:dict = {i: [] for i in range(snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(snake_case ):
for j in range(i + 1 , snake_case ):
if random.random() < probability:
graph[i].append(snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(snake_case )
return graph
def A_ ( snake_case ):
return {
i: [j for j in range(snake_case ) if i != j] for i in range(snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 465 | 1 |
'''simple docstring'''
import math
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 3_6_0:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(UpperCamelCase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law') | 407 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ = 1_0 , UpperCamelCase__ = 1_0_0_0 , UpperCamelCase__ = True ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("""Invalid value for min_val or max_val (min_value < max_value)""" )
return min_val if option else max_val
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
return int((number_a + number_a) / 2 )
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("""argument value for lower and higher must be(lower > higher)""" )
if not lower < to_guess < higher:
raise ValueError(
"""guess value must be within the range of lower and higher value""" )
def answer(UpperCamelCase__ ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("""started...""" )
UpperCAmelCase__ : List[str] = lower
UpperCAmelCase__ : Any = higher
UpperCAmelCase__ : Tuple = []
while True:
UpperCAmelCase__ : Any = get_avg(UpperCamelCase__ , UpperCamelCase__ )
last_numbers.append(UpperCamelCase__ )
if answer(UpperCamelCase__ ) == "low":
UpperCAmelCase__ : Optional[Any] = number
elif answer(UpperCamelCase__ ) == "high":
UpperCAmelCase__ : List[str] = number
else:
break
print(f'''guess the number : {last_numbers[-1]}''' )
print(f'''details : {last_numbers!s}''' )
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = int(input("""Enter lower value : """ ).strip() )
UpperCAmelCase__ : str = int(input("""Enter high value : """ ).strip() )
UpperCAmelCase__ : List[str] = int(input("""Enter value to guess : """ ).strip() )
guess_the_number(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main() | 407 | 1 |
"""simple docstring"""
class a :
def __init__( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
lowercase = name
lowercase = value
lowercase = weight
def __repr__( self ):
return F'{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'
def UpperCamelCase_ ( self ):
return self.value
def UpperCamelCase_ ( self ):
return self.name
def UpperCamelCase_ ( self ):
return self.weight
def UpperCamelCase_ ( self ):
return self.value / self.weight
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ):
'''simple docstring'''
lowercase = []
for i in range(len(__snake_case ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : Any , __snake_case : Any ):
'''simple docstring'''
lowercase = sorted(__snake_case , key=__snake_case , reverse=__snake_case )
lowercase = []
lowercase , lowercase = 0.0, 0.0
for i in range(len(__snake_case ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class a ( unittest.TestCase ):
def UpperCamelCase_ ( self ):
lowercase = tempfile.mkdtemp()
lowercase = BlipImageProcessor()
lowercase = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
lowercase = BlipaProcessor(_lowerCamelCase , _lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self , **_lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).tokenizer
def UpperCamelCase_ ( self , **_lowerCamelCase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_lowerCamelCase ).image_processor
def UpperCamelCase_ ( self ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self ):
lowercase = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
lowercase = [Image.fromarray(np.moveaxis(_lowerCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self ):
lowercase = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowercase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
lowercase = self.get_image_processor(do_normalize=_lowerCamelCase , padding_value=1.0 )
lowercase = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_lowerCamelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = self.prepare_image_inputs()
lowercase = image_processor(_lowerCamelCase , return_tensors='np' )
lowercase = processor(images=_lowerCamelCase , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = 'lower newer'
lowercase = processor(text=_lowerCamelCase )
lowercase = tokenizer(_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = 'lower newer'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCamelCase , images=_lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(_lowerCamelCase ):
processor()
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowercase = processor.batch_decode(_lowerCamelCase )
lowercase = tokenizer.batch_decode(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.get_image_processor()
lowercase = self.get_tokenizer()
lowercase = BlipaProcessor(tokenizer=_lowerCamelCase , image_processor=_lowerCamelCase )
lowercase = 'lower newer'
lowercase = self.prepare_image_inputs()
lowercase = processor(text=_lowerCamelCase , images=_lowerCamelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
| 134 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class A ( __lowercase ):
_snake_case =42
class A ( __lowercase , __lowercase ):
_snake_case =True
@register_to_config
def __init__( self: List[str] , _lowerCAmelCase: int = 3 , _lowerCAmelCase: int = 3 , _lowerCAmelCase: Tuple[str] = ("DownEncoderBlock2D",) , _lowerCAmelCase: Tuple[str] = ("UpDecoderBlock2D",) , _lowerCAmelCase: Tuple[int] = (64,) , _lowerCAmelCase: int = 1 , _lowerCAmelCase: str = "silu" , _lowerCAmelCase: int = 4 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: int = 32 , _lowerCAmelCase: float = 0.1_82_15 , ) -> Tuple:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
UpperCAmelCase_ =Encoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , down_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , act_fn=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , double_z=_lowerCAmelCase , )
# pass init params to Decoder
UpperCAmelCase_ =Decoder(
in_channels=_lowerCAmelCase , out_channels=_lowerCAmelCase , up_block_types=_lowerCAmelCase , block_out_channels=_lowerCAmelCase , layers_per_block=_lowerCAmelCase , norm_num_groups=_lowerCAmelCase , act_fn=_lowerCAmelCase , )
UpperCAmelCase_ =nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
UpperCAmelCase_ =nn.Convad(_lowerCAmelCase , _lowerCAmelCase , 1 )
UpperCAmelCase_ =False
UpperCAmelCase_ =False
# only relevant if vae tiling is enabled
UpperCAmelCase_ =self.config.sample_size
UpperCAmelCase_ =(
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
UpperCAmelCase_ =int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
UpperCAmelCase_ =0.25
def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: Any , _lowerCAmelCase: Optional[int]=False ) -> Tuple:
'''simple docstring'''
if isinstance(_lowerCAmelCase , (Encoder, Decoder) ):
UpperCAmelCase_ =value
def lowerCAmelCase__ ( self: int , _lowerCAmelCase: bool = True ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =use_tiling
def lowerCAmelCase__ ( self: int ) -> Optional[int]:
'''simple docstring'''
self.enable_tiling(_lowerCAmelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =True
def lowerCAmelCase__ ( self: Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowerCAmelCase__ ( self: Any ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
UpperCAmelCase_ ={}
def fn_recursive_add_processors(_lowerCAmelCase: str , _lowerCAmelCase: torch.nn.Module , _lowerCAmelCase: Dict[str, AttentionProcessor] ):
if hasattr(_lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ =module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'{name}.{sub_name}' , _lowerCAmelCase , _lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return processors
def lowerCAmelCase__ ( self: Union[str, Any] , _lowerCAmelCase: Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =len(self.attn_processors.keys() )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != count:
raise ValueError(
F'A dict of processors was passed, but the number of processors {len(_lowerCAmelCase )} does not match the'
F' number of attention layers: {count}. Please make sure to pass {count} processor classes.' )
def fn_recursive_attn_processor(_lowerCAmelCase: str , _lowerCAmelCase: torch.nn.Module , _lowerCAmelCase: Tuple ):
if hasattr(_lowerCAmelCase , "set_processor" ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
module.set_processor(_lowerCAmelCase )
else:
module.set_processor(processor.pop(F'{name}.processor' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'{name}.{sub_name}' , _lowerCAmelCase , _lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict ) -> int:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
if self.use_slicing and x.shape[0] > 1:
UpperCAmelCase_ =[self.encoder(_lowerCAmelCase ) for x_slice in x.split(1 )]
UpperCAmelCase_ =torch.cat(_lowerCAmelCase )
else:
UpperCAmelCase_ =self.encoder(_lowerCAmelCase )
UpperCAmelCase_ =self.quant_conv(_lowerCAmelCase )
UpperCAmelCase_ =DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Optional[int] , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(_lowerCAmelCase , return_dict=_lowerCAmelCase )
UpperCAmelCase_ =self.post_quant_conv(_lowerCAmelCase )
UpperCAmelCase_ =self.decoder(_lowerCAmelCase )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
@apply_forward_hook
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
UpperCAmelCase_ =[self._decode(_lowerCAmelCase ).sample for z_slice in z.split(1 )]
UpperCAmelCase_ =torch.cat(_lowerCAmelCase )
else:
UpperCAmelCase_ =self._decode(_lowerCAmelCase ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: Dict ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =min(a.shape[2] , b.shape[2] , _lowerCAmelCase )
for y in range(_lowerCAmelCase ):
UpperCAmelCase_ =a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =min(a.shape[3] , b.shape[3] , _lowerCAmelCase )
for x in range(_lowerCAmelCase ):
UpperCAmelCase_ =a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
UpperCAmelCase_ =int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ =int(self.tile_latent_min_size * self.tile_overlap_factor )
UpperCAmelCase_ =self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
UpperCAmelCase_ =[]
for i in range(0 , x.shape[2] , _lowerCAmelCase ):
UpperCAmelCase_ =[]
for j in range(0 , x.shape[3] , _lowerCAmelCase ):
UpperCAmelCase_ =x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
UpperCAmelCase_ =self.encoder(_lowerCAmelCase )
UpperCAmelCase_ =self.quant_conv(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
UpperCAmelCase_ =[]
for i, row in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =[]
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ =self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
UpperCAmelCase_ =self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=2 )
UpperCAmelCase_ =DiagonalGaussianDistribution(_lowerCAmelCase )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=_lowerCAmelCase )
def lowerCAmelCase__ ( self: str , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase_ =int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
UpperCAmelCase_ =int(self.tile_sample_min_size * self.tile_overlap_factor )
UpperCAmelCase_ =self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
UpperCAmelCase_ =[]
for i in range(0 , z.shape[2] , _lowerCAmelCase ):
UpperCAmelCase_ =[]
for j in range(0 , z.shape[3] , _lowerCAmelCase ):
UpperCAmelCase_ =z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
UpperCAmelCase_ =self.post_quant_conv(_lowerCAmelCase )
UpperCAmelCase_ =self.decoder(_lowerCAmelCase )
row.append(_lowerCAmelCase )
rows.append(_lowerCAmelCase )
UpperCAmelCase_ =[]
for i, row in enumerate(_lowerCAmelCase ):
UpperCAmelCase_ =[]
for j, tile in enumerate(_lowerCAmelCase ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
UpperCAmelCase_ =self.blend_v(rows[i - 1][j] , _lowerCAmelCase , _lowerCAmelCase )
if j > 0:
UpperCAmelCase_ =self.blend_h(row[j - 1] , _lowerCAmelCase , _lowerCAmelCase )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(_lowerCAmelCase , dim=3 ) )
UpperCAmelCase_ =torch.cat(_lowerCAmelCase , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
def lowerCAmelCase__ ( self: Dict , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: bool = False , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[torch.Generator] = None , ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
UpperCAmelCase_ =sample
UpperCAmelCase_ =self.encode(_lowerCAmelCase ).latent_dist
if sample_posterior:
UpperCAmelCase_ =posterior.sample(generator=_lowerCAmelCase )
else:
UpperCAmelCase_ =posterior.mode()
UpperCAmelCase_ =self.decode(_lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=_lowerCAmelCase )
| 54 |
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
UpperCamelCase__ = logging.getLogger(__name__)
def UpperCamelCase__ ( UpperCAmelCase_=2 , UpperCAmelCase_=3 , UpperCAmelCase_=16 , UpperCAmelCase_ = 10 , UpperCAmelCase_ = 2 ) -> str:
'''simple docstring'''
def get_dataset(UpperCAmelCase_ ):
_lowercase : List[str] = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(UpperCAmelCase_ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
_lowercase : int = get_dataset(UpperCAmelCase_ )
_lowercase : Optional[Any] = get_dataset(UpperCAmelCase_ )
_lowercase : Tuple = DataLoader(UpperCAmelCase_ , shuffle=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , num_workers=4 )
_lowercase : List[Any] = DataLoader(UpperCAmelCase_ , shuffle=UpperCAmelCase_ , batch_size=UpperCAmelCase_ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def UpperCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None ) -> str:
'''simple docstring'''
_lowercase : List[Any] = []
for epoch in range(UpperCAmelCase_ ):
# Train quickly
model.train()
for batch in dataloader:
_lowercase , _lowercase : int = batch
_lowercase : List[str] = model(UpperCAmelCase_ )
_lowercase : Union[str, Any] = torch.nn.functional.mse_loss(UpperCAmelCase_ , UpperCAmelCase_ )
accelerator.backward(UpperCAmelCase_ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class UpperCAmelCase__ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ):
"""simple docstring"""
super().__init__()
_lowercase : str = nn.Parameter(torch.randn(1 ) )
_lowercase : Dict = nn.Parameter(torch.randn(1 ) )
def lowerCAmelCase_ ( self : List[Any] , UpperCamelCase : Optional[Any] ):
"""simple docstring"""
return x * self.a + self.b
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : int = DummyModel()
_lowercase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : int = dummy_dataloaders()
_lowercase : Optional[Any] = ProjectConfiguration(total_limit=1 , project_dir=UpperCamelCase , automatic_checkpoint_naming=UpperCamelCase )
# Train baseline
_lowercase : Optional[Any] = Accelerator(project_config=UpperCamelCase )
_lowercase , _lowercase , _lowercase , _lowercase : List[str] = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Any = DummyModel()
_lowercase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Any = dummy_dataloaders()
# Train baseline
_lowercase : Any = Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save initial
_lowercase : List[Any] = os.path.join(UpperCamelCase , '''initial''' )
accelerator.save_state(UpperCamelCase )
((_lowercase) , (_lowercase)) : List[Any] = model.a.item(), model.b.item()
_lowercase : Dict = optimizer.state_dict()
_lowercase : str = train(3 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_lowercase) , (_lowercase)) : Optional[Any] = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase : int = DummyModel()
_lowercase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Optional[Any] = dummy_dataloaders()
_lowercase : Dict = Accelerator()
_lowercase , _lowercase , _lowercase , _lowercase : Dict = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
accelerator.load_state(UpperCamelCase )
((_lowercase) , (_lowercase)) : str = model.a.item(), model.b.item()
_lowercase : List[Any] = optimizer.state_dict()
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
_lowercase : Dict = train(2 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save everything
_lowercase : Dict = os.path.join(UpperCamelCase , '''checkpoint''' )
accelerator.save_state(UpperCamelCase )
# Load everything back in and make sure all states work
accelerator.load_state(UpperCamelCase )
test_rands += train(1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_lowercase) , (_lowercase)) : Optional[int] = model.a.item(), model.b.item()
_lowercase : Optional[int] = optimizer.state_dict()
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Any = DummyModel()
_lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : Any = dummy_dataloaders()
_lowercase : Any = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase )
# Train baseline
_lowercase : Union[str, Any] = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase )
_lowercase , _lowercase , _lowercase , _lowercase : Tuple = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save initial
accelerator.save_state()
((_lowercase) , (_lowercase)) : Optional[int] = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
_lowercase : str = train(3 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_lowercase) , (_lowercase)) : List[str] = model.a.item(), model.b.item()
_lowercase : Union[str, Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
_lowercase : Dict = DummyModel()
_lowercase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase , _lowercase : int = dummy_dataloaders()
_lowercase : str = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=UpperCamelCase )
_lowercase : Tuple = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase )
_lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
accelerator.load_state(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
((_lowercase) , (_lowercase)) : Union[str, Any] = model.a.item(), model.b.item()
_lowercase : Any = optimizer.state_dict()
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
_lowercase : Optional[int] = train(2 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_1''' ) )
test_rands += train(1 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
((_lowercase) , (_lowercase)) : Optional[int] = model.a.item(), model.b.item()
_lowercase : int = optimizer.state_dict()
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
def lowerCAmelCase_ ( self : int ):
"""simple docstring"""
_lowercase : Optional[int] = torch.tensor([1, 2, 3] )
_lowercase : Any = torch.tensor([2, 3, 4] )
_lowercase : Dict = DummyModel()
_lowercase : Tuple = torch.optim.Adam(net.parameters() )
_lowercase : Tuple = Accelerator()
with self.assertRaises(UpperCamelCase ) as ve:
accelerator.register_for_checkpointing(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
_lowercase : int = str(ve.exception )
self.assertTrue('''Item at index 0''' in message )
self.assertTrue('''Item at index 1''' in message )
self.assertFalse('''Item at index 2''' in message )
self.assertFalse('''Item at index 3''' in message )
def lowerCAmelCase_ ( self : List[str] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : Optional[Any] = DummyModel()
_lowercase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 )
_lowercase : Optional[int] = torch.optim.lr_scheduler.StepLR(UpperCamelCase , step_size=1 , gamma=0.99 )
_lowercase , _lowercase : int = dummy_dataloaders()
_lowercase : int = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase )
# Train baseline
_lowercase : int = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase : str = accelerator.prepare(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Save initial
accelerator.save_state()
_lowercase : Optional[Any] = scheduler.state_dict()
train(3 , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertNotEqual(UpperCamelCase , scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_0''' ) )
self.assertEqual(UpperCamelCase , scheduler.state_dict() )
def lowerCAmelCase_ ( self : Optional[int] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
_lowercase : List[str] = DummyModel()
_lowercase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=UpperCamelCase , total_limit=2 )
# Train baseline
_lowercase : List[Any] = Accelerator(project_dir=UpperCamelCase , project_config=UpperCamelCase )
_lowercase : Union[str, Any] = accelerator.prepare(UpperCamelCase )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_9''' ) ) )
self.assertTrue(os.path.exists(os.path.join(UpperCamelCase , '''checkpoints''' , '''checkpoint_10''' ) ) )
@require_cuda
def lowerCAmelCase_ ( self : Tuple ):
"""simple docstring"""
_lowercase : str = ['''torchrun''', F'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(UpperCamelCase , env=os.environ.copy() )
if __name__ == "__main__":
UpperCamelCase__ = '/tmp/accelerate/state_checkpointing'
UpperCamelCase__ = DummyModel()
UpperCamelCase__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
UpperCamelCase__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
UpperCamelCase__ , UpperCamelCase__ = dummy_dataloaders()
UpperCamelCase__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
UpperCamelCase__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
UpperCamelCase__ , UpperCamelCase__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
UpperCamelCase__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
UpperCamelCase__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
UpperCamelCase__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
UpperCamelCase__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone() | 322 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class _a (lowercase_ ):
'''simple docstring'''
UpperCAmelCase__: List[str] = (DEISMultistepScheduler,)
UpperCAmelCase__: Tuple = (('''num_inference_steps''', 25),)
def __A ( self , **A__ ):
A__ : Dict = {
"""num_train_timesteps""": 1000,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
}
config.update(**lowerCamelCase_ )
return config
def __A ( self , A__=0 , **A__ ):
A__ : Any = dict(self.forward_default_kwargs )
A__ : List[str] = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ )
A__ : Dict = self.dummy_sample
A__ : Any = 0.1 * sample
A__ : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ : int = self.get_scheduler_config(**lowerCamelCase_ )
A__ : Optional[int] = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
A__ : Optional[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
A__ : Optional[int] = scheduler_class.from_pretrained(lowerCamelCase_ )
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals
A__ : List[str] = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ : str = sample, sample
for t in range(lowerCamelCase_ , time_step + scheduler.config.solver_order + 1 ):
A__ : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
A__ : Optional[int] = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self ):
pass
def __A ( self , A__=0 , **A__ ):
A__ : Tuple = dict(self.forward_default_kwargs )
A__ : Optional[Any] = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ )
A__ : Optional[int] = self.dummy_sample
A__ : Optional[Any] = 0.1 * sample
A__ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
A__ : str = self.get_scheduler_config()
A__ : Any = scheduler_class(**lowerCamelCase_ )
scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residuals (must be after setting timesteps)
A__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase_ )
A__ : List[str] = scheduler_class.from_pretrained(lowerCamelCase_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase_ )
# copy over dummy past residual (must be after setting timesteps)
A__ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
A__ : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
A__ : Tuple = new_scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def __A ( self , A__=None , **A__ ):
if scheduler is None:
A__ : Optional[int] = self.scheduler_classes[0]
A__ : Optional[Any] = self.get_scheduler_config(**lowerCamelCase_ )
A__ : int = scheduler_class(**lowerCamelCase_ )
A__ : List[Any] = self.scheduler_classes[0]
A__ : str = self.get_scheduler_config(**lowerCamelCase_ )
A__ : str = scheduler_class(**lowerCamelCase_ )
A__ : Tuple = 10
A__ : Any = self.dummy_model()
A__ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ : Dict = model(lowerCamelCase_ , lowerCamelCase_ )
A__ : Optional[int] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
return sample
def __A ( self ):
A__ : Tuple = dict(self.forward_default_kwargs )
A__ : int = kwargs.pop("""num_inference_steps""" , lowerCamelCase_ )
for scheduler_class in self.scheduler_classes:
A__ : str = self.get_scheduler_config()
A__ : Optional[Any] = scheduler_class(**lowerCamelCase_ )
A__ : Optional[int] = self.dummy_sample
A__ : Any = 0.1 * sample
if num_inference_steps is not None and hasattr(lowerCamelCase_ , """set_timesteps""" ):
scheduler.set_timesteps(lowerCamelCase_ )
elif num_inference_steps is not None and not hasattr(lowerCamelCase_ , """set_timesteps""" ):
A__ : str = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
A__ : Optional[Any] = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
A__ : int = dummy_past_residuals[: scheduler.config.solver_order]
A__ : int = scheduler.timesteps[5]
A__ : int = scheduler.timesteps[6]
A__ : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
A__ : Tuple = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , **lowerCamelCase_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def __A ( self ):
A__ : str = DEISMultistepScheduler(**self.get_scheduler_config() )
A__ : Union[str, Any] = self.full_loop(scheduler=lowerCamelCase_ )
A__ : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
A__ : List[str] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
A__ : Tuple = DPMSolverMultistepScheduler.from_config(scheduler.config )
A__ : Dict = UniPCMultistepScheduler.from_config(scheduler.config )
A__ : int = DEISMultistepScheduler.from_config(scheduler.config )
A__ : Optional[Any] = self.full_loop(scheduler=lowerCamelCase_ )
A__ : List[str] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def __A ( self ):
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase_ )
def __A ( self ):
self.check_over_configs(thresholding=lowerCamelCase_ )
for order in [1, 2, 3]:
for solver_type in ["logrho"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase_ , prediction_type=lowerCamelCase_ , sample_max_value=lowerCamelCase_ , algorithm_type="""deis""" , solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , )
def __A ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase_ )
def __A ( self ):
for algorithm_type in ["deis"]:
for solver_type in ["logrho"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
A__ : Dict = self.full_loop(
solver_order=lowerCamelCase_ , solver_type=lowerCamelCase_ , prediction_type=lowerCamelCase_ , algorithm_type=lowerCamelCase_ , )
assert not torch.isnan(lowerCamelCase_ ).any(), "Samples have nan numbers"
def __A ( self ):
self.check_over_configs(lower_order_final=lowerCamelCase_ )
self.check_over_configs(lower_order_final=lowerCamelCase_ )
def __A ( self ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase_ , time_step=0 )
def __A ( self ):
A__ : List[Any] = self.full_loop()
A__ : List[Any] = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.2_3_9_1_6 ) < 1e-3
def __A ( self ):
A__ : List[str] = self.full_loop(prediction_type="""v_prediction""" )
A__ : Any = torch.mean(torch.abs(lowerCamelCase_ ) )
assert abs(result_mean.item() - 0.0_9_1 ) < 1e-3
def __A ( self ):
A__ : Optional[int] = self.scheduler_classes[0]
A__ : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase_ , dynamic_thresholding_ratio=0 )
A__ : Dict = scheduler_class(**lowerCamelCase_ )
A__ : List[str] = 10
A__ : Optional[int] = self.dummy_model()
A__ : Dict = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase_ )
for i, t in enumerate(scheduler.timesteps ):
A__ : List[Any] = model(lowerCamelCase_ , lowerCamelCase_ )
A__ : List[Any] = scheduler.step(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ).prev_sample
assert sample.dtype == torch.floataa
| 712 |
def UpperCamelCase (lowercase_: int , lowercase_: int ) -> int:
while second != 0:
A__ : int = first & second
first ^= second
A__ : int = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[Any] = int(input('Enter the first number: ').strip())
A_ : List[str] = int(input('Enter the second number: ').strip())
print(f'''{add(first, second) = }''')
| 64 | 0 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase =""
UpperCAmelCase =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
UpperCAmelCase =None # compression type in fsspec. ex: "gzip"
UpperCAmelCase =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , snake_case = "" , snake_case = None , snake_case = None , **snake_case) -> int:
'''simple docstring'''
super().__init__(self , **snake_case)
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
_UpperCAmelCase : Union[str, Any] =fsspec.open(
snake_case , mode='rb' , protocol=snake_case , compression=self.compression , client_kwargs={
'requote_redirect_url': False, # see https://github.com/huggingface/datasets/pull/5459
'trust_env': True, # Enable reading proxy env variables.
**(target_options or {}).pop('client_kwargs' , {}), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
_UpperCAmelCase : List[str] =os.path.basename(self.file.path.split('::')[0])
_UpperCAmelCase : Tuple =(
self.compressed_name[: self.compressed_name.rindex('.')]
if '.' in self.compressed_name
else self.compressed_name
)
_UpperCAmelCase : List[str] =None
@classmethod
def lowerCAmelCase ( cls , snake_case) -> Optional[int]:
'''simple docstring'''
# compressed file paths are always relative to the archive root
return super()._strip_protocol(snake_case).lstrip('/')
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase : Union[str, Any] ={**self.file.fs.info(self.file.path), 'name': self.uncompressed_name}
_UpperCAmelCase : int ={f['name']: f}
def lowerCAmelCase ( self , snake_case) -> Union[str, Any]:
'''simple docstring'''
return self.file.open().read()
def lowerCAmelCase ( self , snake_case , snake_case = "rb" , snake_case=None , snake_case=True , snake_case=None , **snake_case , ) -> Any:
'''simple docstring'''
_UpperCAmelCase : Tuple =self._strip_protocol(snake_case)
if mode != "rb":
raise ValueError(f"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'")
return self.file.open()
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="bz2"
UpperCAmelCase ="bz2"
UpperCAmelCase =".bz2"
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="gzip"
UpperCAmelCase ="gzip"
UpperCAmelCase =".gz"
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="lz4"
UpperCAmelCase ="lz4"
UpperCAmelCase =".lz4"
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="xz"
UpperCAmelCase ="xz"
UpperCAmelCase =".xz"
class __magic_name__ ( lowerCAmelCase ):
UpperCAmelCase ="zstd"
UpperCAmelCase ="zstd"
UpperCAmelCase =".zst"
def __init__( self , snake_case , snake_case = "rb" , snake_case = None , snake_case = None , snake_case = DEFAULT_BLOCK_SIZE , **snake_case , ) -> Tuple:
'''simple docstring'''
super().__init__(
fo=snake_case , mode=snake_case , target_protocol=snake_case , target_options=snake_case , block_size=snake_case , **snake_case , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
_UpperCAmelCase : Any =self.file.__enter__
class __magic_name__ :
def __init__( self , snake_case) -> Any:
'''simple docstring'''
_UpperCAmelCase : Any =file_
def __enter__( self) -> Optional[int]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self , *snake_case , **snake_case) -> Any:
'''simple docstring'''
self._file.__exit__(*snake_case , **snake_case)
def __iter__( self) -> Dict:
'''simple docstring'''
return iter(self._file)
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
return next(self._file)
def __getattr__( self , snake_case) -> List[Any]:
'''simple docstring'''
return getattr(self._file , snake_case)
def fixed_enter(*snake_case , **snake_case):
return WrappedFile(_enter(*snake_case , **snake_case))
_UpperCAmelCase : Any =fixed_enter
| 446 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
lowercase =logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class __magic_name__ ( lowerCAmelCase ):
def __init__( self , *snake_case , **snake_case) -> Dict:
'''simple docstring'''
super().__init__(*snake_case , **snake_case)
requires_backends(self , 'vision')
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING)
def lowerCAmelCase ( self , snake_case=None) -> str:
'''simple docstring'''
_UpperCAmelCase : Any ={}
if top_k is not None:
_UpperCAmelCase : Optional[int] =top_k
return {}, {}, postprocess_params
def __call__( self , snake_case , **snake_case) -> List[str]:
'''simple docstring'''
return super().__call__(snake_case , **snake_case)
def lowerCAmelCase ( self , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =load_image(snake_case)
_UpperCAmelCase : Tuple =self.image_processor(images=snake_case , return_tensors=self.framework)
return model_inputs
def lowerCAmelCase ( self , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.model(**snake_case)
return model_outputs
def lowerCAmelCase ( self , snake_case , snake_case=5) -> Any:
'''simple docstring'''
if top_k > self.model.config.num_labels:
_UpperCAmelCase : Optional[Any] =self.model.config.num_labels
if self.framework == "pt":
_UpperCAmelCase : List[Any] =model_outputs.logits.softmax(-1)[0]
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] =probs.topk(snake_case)
elif self.framework == "tf":
_UpperCAmelCase : int =stable_softmax(model_outputs.logits , axis=-1)[0]
_UpperCAmelCase : List[Any] =tf.math.top_k(snake_case , k=snake_case)
_UpperCAmelCase , _UpperCAmelCase : List[str] =topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(f"Unsupported framework: {self.framework}")
_UpperCAmelCase : int =scores.tolist()
_UpperCAmelCase : Tuple =ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case , snake_case)]
| 446 | 1 |
'''simple docstring'''
# Lint as: python3
import itertools
import os
import re
UpperCAmelCase_ = re.compile(R'([A-Z]+)([A-Z][a-z])')
UpperCAmelCase_ = re.compile(R'([a-z\d])([A-Z])')
UpperCAmelCase_ = re.compile(R'(?<!_)_(?!_)')
UpperCAmelCase_ = re.compile(R'(_{2,})')
UpperCAmelCase_ = R'^\w+(\.\w+)*$'
UpperCAmelCase_ = R'<>:/\|?*'
def lowerCAmelCase_ ( __UpperCAmelCase: Optional[int] ) -> List[Any]:
UpperCamelCase__ : int = _uppercase_uppercase_re.sub(r'''\1_\2''' , __UpperCAmelCase )
UpperCamelCase__ : Tuple = _lowercase_uppercase_re.sub(r'''\1_\2''' , __UpperCAmelCase )
return name.lower()
def lowerCAmelCase_ ( __UpperCAmelCase: Any ) -> Any:
UpperCamelCase__ : Optional[int] = _single_underscore_re.split(__UpperCAmelCase )
UpperCamelCase__ : Dict = [_multiple_underscores_re.split(__UpperCAmelCase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(__UpperCAmelCase ) if n != '''''' )
def lowerCAmelCase_ ( __UpperCAmelCase: int ) -> Union[str, Any]:
if os.path.basename(__UpperCAmelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
return camelcase_to_snakecase(__UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: Dict , __UpperCAmelCase: Optional[int] ) -> Optional[Any]:
if os.path.basename(__UpperCAmelCase ) != name:
raise ValueError(f"Should be a dataset name, not a path: {name}" )
if not re.match(_split_re , __UpperCAmelCase ):
raise ValueError(f"Split name should match '{_split_re}'' but got '{split}'." )
return f"{filename_prefix_for_name(__UpperCAmelCase )}-{split}"
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Dict , __UpperCAmelCase: List[Any] , __UpperCAmelCase: List[Any]=None ) -> Union[str, Any]:
UpperCamelCase__ : Optional[Any] = filename_prefix_for_split(__UpperCAmelCase , __UpperCAmelCase )
if filetype_suffix:
prefix += f".{filetype_suffix}"
UpperCamelCase__ : Dict = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
return f"{filepath}*"
def lowerCAmelCase_ ( __UpperCAmelCase: List[str] , __UpperCAmelCase: List[str] , __UpperCAmelCase: int , __UpperCAmelCase: str=None , __UpperCAmelCase: Tuple=None ) -> Tuple:
UpperCamelCase__ : str = filename_prefix_for_split(__UpperCAmelCase , __UpperCAmelCase )
UpperCamelCase__ : Tuple = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if shard_lengths:
UpperCamelCase__ : Optional[Any] = len(__UpperCAmelCase )
UpperCamelCase__ : List[str] = [f"{prefix}-{shard_id:05d}-of-{num_shards:05d}" for shard_id in range(__UpperCAmelCase )]
if filetype_suffix:
UpperCamelCase__ : Union[str, Any] = [filename + f".{filetype_suffix}" for filename in filenames]
return filenames
else:
UpperCamelCase__ : Tuple = prefix
if filetype_suffix:
filename += f".{filetype_suffix}"
return [filename]
| 706 |
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__, __magic_name__ = 13, __magic_name__ = 64, __magic_name__ = 2, __magic_name__ = 3, __magic_name__ = 3, __magic_name__ = True, __magic_name__ = True, __magic_name__ = 128, __magic_name__=[16, 32, 64, 128], __magic_name__ = 7, __magic_name__ = 4, __magic_name__ = 37, __magic_name__ = "gelu", __magic_name__ = 0.1, __magic_name__ = 0.1, __magic_name__ = 10, __magic_name__ = 0.02, __magic_name__ = 2, __magic_name__ = 1, __magic_name__ = 128, __magic_name__ = [2, 2, 2, 2], __magic_name__ = 2, __magic_name__ = 2, ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : List[str] = parent
UpperCamelCase__ : Optional[int] = batch_size
UpperCamelCase__ : Optional[Any] = image_size
UpperCamelCase__ : Optional[int] = patch_size
UpperCamelCase__ : Any = num_channels
UpperCamelCase__ : int = is_training
UpperCamelCase__ : str = use_labels
UpperCamelCase__ : Optional[Any] = hidden_size
UpperCamelCase__ : Tuple = num_hidden_layers
UpperCamelCase__ : str = num_attention_heads
UpperCamelCase__ : Any = intermediate_size
UpperCamelCase__ : int = hidden_act
UpperCamelCase__ : Dict = hidden_dropout_prob
UpperCamelCase__ : str = attention_probs_dropout_prob
UpperCamelCase__ : Tuple = type_sequence_label_size
UpperCamelCase__ : Optional[Any] = initializer_range
UpperCamelCase__ : Optional[int] = encoder_stride
UpperCamelCase__ : Any = num_attention_outputs
UpperCamelCase__ : Dict = embed_dim
UpperCamelCase__ : str = embed_dim + 1
UpperCamelCase__ : int = resolution
UpperCamelCase__ : List[str] = depths
UpperCamelCase__ : str = hidden_sizes
UpperCamelCase__ : Tuple = dim
UpperCamelCase__ : Optional[int] = mlp_expansion_ratio
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase__ : Dict = None
if self.use_labels:
UpperCamelCase__ : Optional[Any] = ids_tensor([self.batch_size], self.type_sequence_label_size )
UpperCamelCase__ : int = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
return EfficientFormerConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=__magic_name__, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, resolution=self.resolution, depths=self.depths, hidden_sizes=self.hidden_sizes, dim=self.dim, mlp_expansion_ratio=self.mlp_expansion_ratio, )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Any:
"""simple docstring"""
UpperCamelCase__ : Dict = TFEfficientFormerModel(config=__magic_name__ )
UpperCamelCase__ : str = model(__magic_name__, training=__magic_name__ )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.type_sequence_label_size
UpperCamelCase__ : Dict = TFEfficientFormerForImageClassification(__magic_name__ )
UpperCamelCase__ : Any = model(__magic_name__, labels=__magic_name__, training=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCamelCase__ : Optional[Any] = 1
UpperCamelCase__ : List[str] = TFEfficientFormerForImageClassification(__magic_name__ )
UpperCamelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCamelCase__ : Union[str, Any] = model(__magic_name__, labels=__magic_name__ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.prepare_config_and_inputs()
UpperCamelCase__ ,UpperCamelCase__ ,UpperCamelCase__ : Dict = config_and_inputs
UpperCamelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase__ ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
a : int = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
a : Union[str, Any] = (
{
"feature-extraction": TFEfficientFormerModel,
"image-classification": (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
a : Any = False
a : Tuple = False
a : Any = False
a : int = False
a : Tuple = False
def UpperCamelCase__ ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = TFEfficientFormerModelTester(self )
UpperCamelCase__ : int = ConfigTester(
self, config_class=__magic_name__, has_text_modality=__magic_name__, hidden_size=37 )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''EfficientFormer does not use inputs_embeds''' )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason='''EfficientFormer does not support input and output embeddings''' )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def UpperCamelCase__ ( self ) -> int:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = model_class(__magic_name__ )
UpperCamelCase__ : List[Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase__ : List[str] = [*signature.parameters.keys()]
UpperCamelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1], __magic_name__ )
def UpperCamelCase__ ( self ) -> str:
"""simple docstring"""
def check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ ):
UpperCamelCase__ : Union[str, Any] = model_class(__magic_name__ )
UpperCamelCase__ : str = model(**self._prepare_for_class(__magic_name__, __magic_name__ ), training=__magic_name__ )
UpperCamelCase__ : Tuple = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
UpperCamelCase__ : Optional[int] = getattr(
self.model_tester, '''expected_num_hidden_layers''', self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__magic_name__ ), __magic_name__ )
if hasattr(self.model_tester, '''encoder_seq_length''' ):
UpperCamelCase__ : Dict = self.model_tester.encoder_seq_length
if hasattr(self.model_tester, '''chunk_length''' ) and self.model_tester.chunk_length > 1:
UpperCamelCase__ : Tuple = seq_length * self.model_tester.chunk_length
else:
UpperCamelCase__ : Optional[Any] = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [seq_length, self.model_tester.hidden_size], )
if config.is_encoder_decoder:
UpperCamelCase__ : List[str] = outputs.decoder_hidden_states
self.asseretIsInstance(__magic_name__, (list, tuple) )
self.assertEqual(len(__magic_name__ ), __magic_name__ )
UpperCamelCase__ : str = getattr(self.model_tester, '''seq_length''', __magic_name__ )
UpperCamelCase__ : Optional[Any] = getattr(self.model_tester, '''decoder_seq_length''', __magic_name__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ), [decoder_seq_length, self.model_tester.hidden_size], )
UpperCamelCase__ ,UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase__ : List[str] = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase__ : List[Any] = True
check_hidden_states_output(__magic_name__, __magic_name__, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__, __magic_name__=False ) -> Any:
"""simple docstring"""
UpperCamelCase__ : List[str] = super()._prepare_for_class(__magic_name__, __magic_name__, return_labels=__magic_name__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__magic_name__ )
@unittest.skip(reason='''EfficientFormer does not implement masked image modeling yet''' )
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__magic_name__ )
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__magic_name__ )
@slow
def UpperCamelCase__ ( self ) -> Optional[int]:
"""simple docstring"""
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ : Dict = TFEfficientFormerModel.from_pretrained(__magic_name__ )
self.assertIsNotNone(__magic_name__ )
def UpperCamelCase__ ( self ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ ,UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Optional[Any] = getattr(self.model_tester, '''seq_length''', __magic_name__ )
UpperCamelCase__ : Any = getattr(self.model_tester, '''encoder_seq_length''', __magic_name__ )
UpperCamelCase__ : Tuple = getattr(self.model_tester, '''key_length''', __magic_name__ )
UpperCamelCase__ : Union[str, Any] = getattr(self.model_tester, '''chunk_length''', __magic_name__ )
if chunk_length is not None and hasattr(self.model_tester, '''num_hashes''' ):
UpperCamelCase__ : Dict = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
UpperCamelCase__ : Dict = True
UpperCamelCase__ : Optional[int] = False
UpperCamelCase__ : Any = True
UpperCamelCase__ : str = model_class(__magic_name__ )
UpperCamelCase__ : Optional[int] = model(**self._prepare_for_class(__magic_name__, __magic_name__ ), training=__magic_name__ )
UpperCamelCase__ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ), self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase__ : Optional[int] = True
UpperCamelCase__ : int = model_class(__magic_name__ )
UpperCamelCase__ : str = model(**self._prepare_for_class(__magic_name__, __magic_name__ ), training=__magic_name__ )
UpperCamelCase__ : List[str] = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__magic_name__ ), self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ), [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length], )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
UpperCamelCase__ ,UpperCamelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
UpperCamelCase__ : str = model_class(__magic_name__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
UpperCamelCase__ : Tuple = {
key: tf.keras.Input(shape=val.shape[1:], dtype=val.dtype, name=__magic_name__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
UpperCamelCase__ : str = model(__magic_name__ )
self.assertTrue(outputs_dict is not None )
def lowerCAmelCase_ ( ) -> List[str]:
UpperCamelCase__ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
return (
EfficientFormerImageProcessor.from_pretrained('''snap-research/efficientformer-l1-300''' )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ) -> Optional[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = TFEfficientFormerForImageClassification.from_pretrained('''snap-research/efficientformer-l1-300''' )
UpperCamelCase__ : Dict = self.default_image_processor
UpperCamelCase__ : List[str] = prepare_img()
UpperCamelCase__ : str = image_processor(images=__magic_name__, return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : Dict = model(**__magic_name__, training=__magic_name__ )
# verify the logits
UpperCamelCase__ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : List[str] = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3], __magic_name__, atol=1E-4 ) )
@slow
def UpperCamelCase__ ( self ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : str = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'''snap-research/efficientformer-l1-300''' )
UpperCamelCase__ : List[str] = self.default_image_processor
UpperCamelCase__ : Union[str, Any] = prepare_img()
UpperCamelCase__ : int = image_processor(images=__magic_name__, return_tensors='''tf''' )
# forward pass
UpperCamelCase__ : Tuple = model(**__magic_name__, training=__magic_name__ )
# verify the logits
UpperCamelCase__ : Tuple = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape, __magic_name__ )
UpperCamelCase__ : Optional[int] = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3], __magic_name__, atol=1E-4 ) )
| 369 | 0 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCamelCase : int = logging.getLogger()
def SCREAMING_SNAKE_CASE__ ( ) -> int:
snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument("""-f""" )
snake_case : str = parser.parse_args()
return args.f
def SCREAMING_SNAKE_CASE__ ( lowercase ) -> List[Any]:
snake_case : List[Any] = {}
snake_case : List[str] = os.path.join(snake_case__ ,"""all_results.json""" )
if os.path.exists(snake_case__ ):
with open(snake_case__ ,"""r""" ) as f:
snake_case : Any = json.load(snake_case__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
snake_case : Tuple = torch.cuda.is_available() and torch_device == """cuda"""
return is_using_cuda and is_apex_available()
lowerCamelCase : Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowercase (UpperCamelCase_ ):
"""simple docstring"""
@classmethod
def UpperCAmelCase ( cls ) -> Tuple:
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
snake_case : Any = tempfile.mkdtemp()
snake_case : Tuple = os.path.join(cls.tmpdir , """default_config.yml""" )
write_basic_config(save_location=cls.configPath )
snake_case : List[Any] = ["""accelerate""", """launch""", """--config_file""", cls.configPath]
@classmethod
def UpperCAmelCase ( cls ) -> Union[str, Any]:
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : int = self.get_auto_remove_tmp_dir()
snake_case : Optional[int] = f"""\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n """.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case : str = get_results(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """glue_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase ( self ) -> List[str]:
snake_case : List[str] = self.get_auto_remove_tmp_dir()
snake_case : Dict = f"""\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n """.split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
snake_case : Any = get_results(__SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 1_0_0 )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """clm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : str = self.get_auto_remove_tmp_dir()
snake_case : str = f"""\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
snake_case : List[Any] = get_results(__SCREAMING_SNAKE_CASE )
self.assertLess(result["""perplexity"""] , 4_2 )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """mlm_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase ( self ) -> int:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
snake_case : Optional[int] = 7 if get_gpu_count() > 1 else 2
snake_case : Any = self.get_auto_remove_tmp_dir()
snake_case : Dict = f"""\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
snake_case : Optional[int] = get_results(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
self.assertLess(result["""train_loss"""] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """ner_no_trainer""" ) ) )
@unittest.skip(reason="""Fix me @muellerzr""" )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase ( self ) -> Dict:
snake_case : Optional[int] = self.get_auto_remove_tmp_dir()
snake_case : Optional[int] = f"""\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
snake_case : Optional[int] = get_results(__SCREAMING_SNAKE_CASE )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result["""eval_f1"""] , 2_8 )
self.assertGreaterEqual(result["""eval_exact"""] , 2_8 )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """qa_no_trainer""" ) ) )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase ( self ) -> Any:
snake_case : Any = self.get_auto_remove_tmp_dir()
snake_case : List[str] = f"""\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
snake_case : Optional[int] = get_results(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """swag_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case : List[str] = f"""\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
snake_case : Optional[Any] = get_results(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_rouge1"""] , 1_0 )
self.assertGreaterEqual(result["""eval_rouge2"""] , 2 )
self.assertGreaterEqual(result["""eval_rougeL"""] , 7 )
self.assertGreaterEqual(result["""eval_rougeLsum"""] , 7 )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """summarization_no_trainer""" ) ) )
@slow
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase ( self ) -> int:
snake_case : Union[str, Any] = self.get_auto_remove_tmp_dir()
snake_case : Tuple = f"""\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n """.split()
run_command(self._launch_args + testargs )
snake_case : Optional[Any] = get_results(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_bleu"""] , 3_0 )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """epoch_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """translation_no_trainer""" ) ) )
@slow
def UpperCAmelCase ( self ) -> Optional[Any]:
snake_case : Optional[int] = logging.StreamHandler(sys.stdout )
logger.addHandler(__SCREAMING_SNAKE_CASE )
snake_case : int = self.get_auto_remove_tmp_dir()
snake_case : str = f"""\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n """.split()
run_command(self._launch_args + testargs )
snake_case : str = get_results(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_overall_accuracy"""] , 0.10 )
@mock.patch.dict(os.environ , {"""WANDB_MODE""": """offline"""} )
def UpperCAmelCase ( self ) -> List[Any]:
snake_case : Optional[Any] = self.get_auto_remove_tmp_dir()
snake_case : Optional[int] = f"""\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n """.split()
if is_cuda_and_apex_available():
testargs.append("""--fp16""" )
run_command(self._launch_args + testargs )
snake_case : List[Any] = get_results(__SCREAMING_SNAKE_CASE )
# The base model scores a 25%
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """step_1""" ) ) )
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , """image_classification_no_trainer""" ) ) )
| 587 | import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase_ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = KandinskyVaaPipeline
UpperCAmelCase_ : List[str] = [
"""image_embeds""",
"""negative_image_embeds""",
]
UpperCAmelCase_ : Optional[Any] = ["""image_embeds""", """negative_image_embeds"""]
UpperCAmelCase_ : Union[str, Any] = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
UpperCAmelCase_ : Tuple = False
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
return 32
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return 100
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Dict:
torch.manual_seed(0 )
lowerCAmelCase = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
lowerCAmelCase = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
torch.manual_seed(0 )
lowerCAmelCase = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
lowerCAmelCase = self.dummy_unet
lowerCAmelCase = self.dummy_movq
lowerCAmelCase = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule='''linear''' , beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , clip_sample=__SCREAMING_SNAKE_CASE , set_alpha_to_one=__SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='''epsilon''' , thresholding=__SCREAMING_SNAKE_CASE , )
lowerCAmelCase = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def SCREAMING_SNAKE_CASE_ ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) ->Union[str, Any]:
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__SCREAMING_SNAKE_CASE )
if str(__SCREAMING_SNAKE_CASE ).startswith('''mps''' ):
lowerCAmelCase = torch.manual_seed(__SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = {
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''guidance_scale''': 4.0,
'''num_inference_steps''': 2,
'''output_type''': '''np''',
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = '''cpu'''
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = self.pipeline_class(**__SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe.to(__SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) )
lowerCAmelCase = output.images
lowerCAmelCase = pipe(
**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase = np.array(
[0.6_2_3_7_9_7_6, 1.0, 0.3_6_4_4_1_3_3_2, 1.0, 0.7_0_6_3_9_6_3_4, 0.2_9_8_7_7_1_8_6, 0.8_5_6_5_2_1_2_5, 0.5_2_1_6_8_4_3, 0.5_4_4_5_4_0_4_6] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
lowerCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy''' )
lowerCAmelCase = KandinskyVaaPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__SCREAMING_SNAKE_CASE )
lowerCAmelCase = KandinskyVaaPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-decoder''' , torch_dtype=torch.floataa )
lowerCAmelCase = pipeline.to(__SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE )
lowerCAmelCase = '''red cat, 4k photo'''
lowerCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase , lowerCAmelCase = pipe_prior(
__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
lowerCAmelCase = torch.Generator(device='''cuda''' ).manual_seed(0 )
lowerCAmelCase = pipeline(
image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , output_type='''np''' , )
lowerCAmelCase = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 312 | 0 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class _A ( UpperCamelCase ):
'''simple docstring'''
_lowercase = field(default='summarization' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase = Features({'text': Value('string' )} )
_lowercase = Features({'summary': Value('string' )} )
_lowercase = 'text'
_lowercase = 'summary'
@property
def __lowerCAmelCase ( self : Optional[int] )-> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 717 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _A :
'''simple docstring'''
def __init__( self : Optional[int] , lowerCamelCase : str , lowerCamelCase : int=13 , lowerCamelCase : Union[str, Any]=7 , lowerCamelCase : Optional[int]=True , lowerCamelCase : Any=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any=True , lowerCamelCase : Tuple=99 , lowerCamelCase : Optional[Any]=32 , lowerCamelCase : Tuple=2 , lowerCamelCase : Dict=4 , lowerCamelCase : Tuple=37 , lowerCamelCase : Dict="gelu" , lowerCamelCase : str=0.1 , lowerCamelCase : str=0.1 , lowerCamelCase : List[Any]=512 , lowerCamelCase : Union[str, Any]=16 , lowerCamelCase : Tuple=2 , lowerCamelCase : Union[str, Any]=0.02 , lowerCamelCase : Optional[Any]=3 , lowerCamelCase : Tuple=4 , lowerCamelCase : Union[str, Any]=None , )-> List[Any]:
snake_case__ : str = parent
snake_case__ : Optional[int] = 13
snake_case__ : List[str] = 7
snake_case__ : Tuple = True
snake_case__ : List[str] = True
snake_case__ : List[str] = True
snake_case__ : Tuple = True
snake_case__ : List[str] = 99
snake_case__ : str = 384
snake_case__ : int = 2
snake_case__ : int = 4
snake_case__ : str = 37
snake_case__ : Optional[Any] = """gelu"""
snake_case__ : Dict = 0.1
snake_case__ : str = 0.1
snake_case__ : str = 512
snake_case__ : List[Any] = 16
snake_case__ : List[Any] = 2
snake_case__ : str = 0.02
snake_case__ : int = 3
snake_case__ : int = 4
snake_case__ : Optional[int] = 128
snake_case__ : Tuple = 2
snake_case__ : str = 9
snake_case__ : Optional[int] = 1
snake_case__ : str = None
def __lowerCAmelCase ( self : List[str] )-> Union[str, Any]:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : int = None
if self.use_input_mask:
snake_case__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
if self.use_token_type_ids:
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Union[str, Any] = None
snake_case__ : Optional[Any] = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any] )-> Optional[int]:
snake_case__ : str = TFConvBertModel(config=lowerCamelCase )
snake_case__ : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
snake_case__ : Optional[Any] = [input_ids, input_mask]
snake_case__ : Optional[Any] = model(lowerCamelCase )
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : Dict , lowerCamelCase : Any , lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : int , lowerCamelCase : Optional[int] )-> Tuple:
snake_case__ : str = TFConvBertForMaskedLM(config=lowerCamelCase )
snake_case__ : Union[str, Any] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Dict = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] )-> Optional[int]:
snake_case__ : Optional[int] = self.num_labels
snake_case__ : List[str] = TFConvBertForSequenceClassification(config=lowerCamelCase )
snake_case__ : Tuple = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Dict , lowerCamelCase : Dict , lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple )-> Dict:
snake_case__ : Optional[Any] = self.num_choices
snake_case__ : Tuple = TFConvBertForMultipleChoice(config=lowerCamelCase )
snake_case__ : Optional[Any] = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Dict = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : str = tf.tile(tf.expand_dims(lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Any = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] )-> Optional[int]:
snake_case__ : str = self.num_labels
snake_case__ : Dict = TFConvBertForTokenClassification(config=lowerCamelCase )
snake_case__ : List[str] = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Tuple = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : List[str] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : str , lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : List[str] )-> Optional[Any]:
snake_case__ : int = TFConvBertForQuestionAnswering(config=lowerCamelCase )
snake_case__ : Dict = {
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
snake_case__ : Any = model(lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Any )-> Tuple:
snake_case__ : Dict = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : List[Any] = config_and_inputs
snake_case__ : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class _A ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowercase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_lowercase = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase = False
_lowercase = False
_lowercase = False
def __lowerCAmelCase ( self : str )-> Optional[Any]:
snake_case__ : Optional[Any] = TFConvBertModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=lowerCamelCase , hidden_size=37 )
def __lowerCAmelCase ( self : int )-> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : int )-> Optional[Any]:
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def __lowerCAmelCase ( self : List[str] )-> Any:
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase )
def __lowerCAmelCase ( self : Optional[Any] )-> Tuple:
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase )
def __lowerCAmelCase ( self : Optional[int] )-> Union[str, Any]:
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase )
def __lowerCAmelCase ( self : List[Any] )-> int:
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase )
def __lowerCAmelCase ( self : Union[str, Any] )-> List[str]:
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase )
@slow
def __lowerCAmelCase ( self : Optional[Any] )-> List[str]:
snake_case__ , snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : List[str] = True
snake_case__ : List[Any] = True
if hasattr(lowerCamelCase , """use_cache""" ):
snake_case__ : List[Any] = True
snake_case__ : Optional[int] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : List[Any] = getattr(self.model_tester , """key_length""" , lowerCamelCase )
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = self._prepare_for_class(lowerCamelCase , lowerCamelCase )
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : Any = len(model(lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase , saved_model=lowerCamelCase )
snake_case__ : Any = os.path.join(lowerCamelCase , """saved_model""" , """1""" )
snake_case__ : Dict = tf.keras.models.load_model(lowerCamelCase )
snake_case__ : Any = model(lowerCamelCase )
if self.is_encoder_decoder:
snake_case__ : Optional[int] = outputs["""encoder_hidden_states"""]
snake_case__ : str = outputs["""encoder_attentions"""]
else:
snake_case__ : int = outputs["""hidden_states"""]
snake_case__ : List[Any] = outputs["""attentions"""]
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
snake_case__ : Any = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(lowerCamelCase ) , lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Any )-> List[Any]:
snake_case__ : Dict = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
self.assertIsNotNone(lowerCamelCase )
def __lowerCAmelCase ( self : List[Any] )-> int:
snake_case__ , snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
snake_case__ : int = getattr(self.model_tester , """decoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Optional[Any] = getattr(self.model_tester , """encoder_seq_length""" , self.model_tester.seq_length )
snake_case__ : Any = getattr(self.model_tester , """key_length""" , lowerCamelCase )
snake_case__ : Any = getattr(self.model_tester , """key_length""" , lowerCamelCase )
def check_decoder_attentions_output(lowerCamelCase : List[Any] ):
snake_case__ : Tuple = len(lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
snake_case__ : str = outputs.decoder_attentions
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(lowerCamelCase : List[Any] ):
snake_case__ : List[Any] = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
snake_case__ : Dict = True
snake_case__ : Any = False
snake_case__ : List[Any] = model_class(lowerCamelCase )
snake_case__ : int = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
snake_case__ : Any = len(lowerCamelCase )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
if self.is_encoder_decoder:
snake_case__ : List[str] = model_class(lowerCamelCase )
snake_case__ : Dict = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_decoder_attentions_output(lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
snake_case__ : Tuple = True
snake_case__ : Any = model_class(lowerCamelCase )
snake_case__ : str = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
# Check attention is always last and order is fine
snake_case__ : List[Any] = True
snake_case__ : List[Any] = True
snake_case__ : str = model_class(lowerCamelCase )
snake_case__ : Optional[Any] = model(self._prepare_for_class(lowerCamelCase , lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , lowerCamelCase )
check_encoder_attentions_output(lowerCamelCase )
@require_tf
class _A ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : str )-> str:
snake_case__ : List[Any] = TFConvBertModel.from_pretrained("""YituTech/conv-bert-base""" )
snake_case__ : str = tf.constant([[0, 1, 2, 3, 4, 5]] )
snake_case__ : Optional[Any] = model(lowerCamelCase )[0]
snake_case__ : List[str] = [1, 6, 768]
self.assertEqual(output.shape , lowerCamelCase )
snake_case__ : int = tf.constant(
[
[
[-0.03_475_493, -0.4_686_034, -0.30_638_832],
[0.22_637_248, -0.26_988_646, -0.7_423_424],
[0.10_324_868, -0.45_013_508, -0.58_280_784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , lowerCamelCase , atol=1e-4 )
| 172 | 0 |
import logging
import os
from logging import (
CRITICAL, # NOQA
DEBUG, # NOQA
ERROR, # NOQA
FATAL, # NOQA
INFO, # NOQA
NOTSET, # NOQA
WARN, # NOQA
WARNING, # NOQA
)
from typing import Optional
from tqdm import auto as tqdm_lib
lowercase_ = {
"""debug""": logging.DEBUG,
"""info""": logging.INFO,
"""warning""": logging.WARNING,
"""error""": logging.ERROR,
"""critical""": logging.CRITICAL,
}
lowercase_ = logging.WARNING
def __lowerCAmelCase ( ):
lowercase__ = os.getenv("DATASETS_VERBOSITY" , _SCREAMING_SNAKE_CASE )
if env_level_str:
if env_level_str in log_levels:
return log_levels[env_level_str]
else:
logging.getLogger().warning(
f'''Unknown option DATASETS_VERBOSITY={env_level_str}, '''
f'''has to be one of: { ", ".join(log_levels.keys() ) }''' )
return _default_log_level
def __lowerCAmelCase ( ):
return __name__.split("." )[0]
def __lowerCAmelCase ( ):
return logging.getLogger(_get_library_name() )
def __lowerCAmelCase ( ):
# Apply our default configuration to the library root logger.
lowercase__ = _get_library_root_logger()
library_root_logger.setLevel(_get_default_logging_level() )
def __lowerCAmelCase ( ):
lowercase__ = _get_library_root_logger()
library_root_logger.setLevel(logging.NOTSET )
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ = None ):
if name is None:
lowercase__ = _get_library_name()
return logging.getLogger(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
return _get_library_root_logger().getEffectiveLevel()
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
_get_library_root_logger().setLevel(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
return set_verbosity(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( ):
lowercase__ = False
def __lowerCAmelCase ( ):
lowercase__ = True
# Configure the library root logger at the module level (singleton-like)
_configure_library_root_logger()
class _snake_case :
def __init__( self : Optional[Any], *__lowercase : List[Any], **__lowercase : Union[str, Any] ): # pylint: disable=unused-argument
lowercase__ = args[0] if args else None
def __iter__( self : int ):
return iter(self._iterator )
def __getattr__( self : Union[str, Any], __lowercase : Optional[int] ):
def empty_fn(*__lowercase : Union[str, Any], **__lowercase : Union[str, Any] ): # pylint: disable=unused-argument
return
return empty_fn
def __enter__( self : List[Any] ):
return self
def __exit__( self : Optional[int], __lowercase : List[str], __lowercase : int, __lowercase : Optional[Any] ):
return
lowercase_ = True
class _snake_case :
def __call__( self : Tuple, *__lowercase : Optional[Any], __lowercase : int=False, **__lowercase : int ):
if _tqdm_active and not disable:
return tqdm_lib.tqdm(*__UpperCAmelCase, **__UpperCAmelCase )
else:
return EmptyTqdm(*__UpperCAmelCase, **__UpperCAmelCase )
def A__ ( self : Optional[Any], *__lowercase : int, **__lowercase : Dict ):
lowercase__ = None
if _tqdm_active:
return tqdm_lib.tqdm.set_lock(*__UpperCAmelCase, **__UpperCAmelCase )
def A__ ( self : Optional[int] ):
if _tqdm_active:
return tqdm_lib.tqdm.get_lock()
lowercase_ = _tqdm_cls()
def __lowerCAmelCase ( ):
global _tqdm_active
return bool(_tqdm_active )
def __lowerCAmelCase ( ):
global _tqdm_active
lowercase__ = True
def __lowerCAmelCase ( ):
global _tqdm_active
lowercase__ = False
| 413 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class a__ ( _lowercase, _lowercase ):
__magic_name__ : List[Any] = "resnet"
__magic_name__ : Optional[int] = ["basic", "bottleneck"]
def __init__(self : Optional[Any], __UpperCAmelCase : Optional[Any]=3, __UpperCAmelCase : Tuple=64, __UpperCAmelCase : str=[256, 512, 1024, 2048], __UpperCAmelCase : Optional[Any]=[3, 4, 6, 3], __UpperCAmelCase : List[str]="bottleneck", __UpperCAmelCase : Dict="relu", __UpperCAmelCase : Any=False, __UpperCAmelCase : List[Any]=None, __UpperCAmelCase : List[str]=None, **__UpperCAmelCase : int, ) -> Any:
"""simple docstring"""
super().__init__(**__UpperCAmelCase )
if layer_type not in self.layer_types:
raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' )
SCREAMING_SNAKE_CASE : Optional[Any] = num_channels
SCREAMING_SNAKE_CASE : Optional[int] = embedding_size
SCREAMING_SNAKE_CASE : Tuple = hidden_sizes
SCREAMING_SNAKE_CASE : Dict = depths
SCREAMING_SNAKE_CASE : List[str] = layer_type
SCREAMING_SNAKE_CASE : Tuple = hidden_act
SCREAMING_SNAKE_CASE : Optional[int] = downsample_in_first_stage
SCREAMING_SNAKE_CASE : str = ['''stem'''] + [F'''stage{idx}''' for idx in range(1, len(__UpperCAmelCase ) + 1 )]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = get_aligned_output_features_output_indices(
out_features=__UpperCAmelCase, out_indices=__UpperCAmelCase, stage_names=self.stage_names )
class a__ ( _lowercase ):
__magic_name__ : Optional[int] = version.parse("1.11" )
@property
def lowercase__ (self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ (self : int ) -> float:
"""simple docstring"""
return 1e-3
| 507 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Node(1 )
_UpperCAmelCase = Node(2 )
_UpperCAmelCase = Node(3 )
_UpperCAmelCase = Node(4 )
_UpperCAmelCase = Node(5 )
return tree
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
_UpperCAmelCase = []
if root is None:
return output
_UpperCAmelCase = deque([root] )
while process_queue:
_UpperCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase ( A : Node | None , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
def populate_output(A : Node | None , A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A , A )
return output
def UpperCAmelCase ( A : Node | None , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
def populate_output(A : Node | None , A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A , A )
return output
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
if root is None:
return []
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = height(A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A , A ) )
_UpperCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(A , A ) )
_UpperCAmelCase = 0
return output
def UpperCAmelCase ( ): # Main function for testing.
'''simple docstring'''
_UpperCAmelCase = make_tree()
print(f'In-order Traversal: {inorder(A )}' )
print(f'Pre-order Traversal: {preorder(A )}' )
print(f'Post-order Traversal: {postorder(A )}' , '\n' )
print(f'Height of Tree: {height(A )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(A ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(A , level=A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 | 1 |
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {'''configuration_gpt_neox''': ['''GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoXConfig''']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['''GPTNeoXTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'''GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoXForCausalLM''',
'''GPTNeoXForQuestionAnswering''',
'''GPTNeoXForSequenceClassification''',
'''GPTNeoXForTokenClassification''',
'''GPTNeoXLayer''',
'''GPTNeoXModel''',
'''GPTNeoXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 254 |
"""simple docstring"""
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
lowercase__ : str = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def __lowercase ( _a , _a ):
inspect_dataset(_a , _a )
snake_case_ : Optional[int] = path + '''.py'''
assert script_name in os.listdir(_a )
assert "__pycache__" not in os.listdir(_a )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def __lowercase ( _a , _a ):
inspect_metric(_a , _a )
snake_case_ : List[Any] = path + '''.py'''
assert script_name in os.listdir(_a )
assert "__pycache__" not in os.listdir(_a )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowercase ( _a , _a , _a ):
snake_case_ : List[str] = get_dataset_config_info(_a , config_name=_a )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowercase ( _a , _a , _a ):
with pytest.raises(_a ):
get_dataset_config_info(_a , config_name=_a )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def __lowercase ( _a , _a ):
snake_case_ : str = get_dataset_config_names(_a )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def __lowercase ( _a , _a , _a ):
snake_case_ : Any = get_dataset_infos(_a )
assert list(infos.keys() ) == expected_configs
snake_case_ : Tuple = expected_configs[0]
assert expected_config in infos
snake_case_ : List[Any] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def __lowercase ( _a , _a , _a ):
snake_case_ : Any = get_dataset_infos(_a )
assert expected_config in infos
snake_case_ : List[str] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def __lowercase ( _a , _a , _a ):
with pytest.raises(_a ):
get_dataset_split_names(_a , config_name=_a )
| 123 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
lowercase_: Union[str, Any] = '0.12' # assumed parallelism: 8
if is_torch_available():
import torch
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None):
"""simple docstring"""
if rng is None:
snake_case__ : Optional[int] = random.Random()
snake_case__ : Optional[int] = 1
for dim in shape:
total_dims *= dim
snake_case__ : List[Any] = []
for _ in range(UpperCAmelCase_):
values.append(rng.randint(0 , vocab_size - 1))
snake_case__ : List[Any] = np.array(UpperCAmelCase_ , dtype=jnp.intaa).reshape(UpperCAmelCase_)
return output
def _lowercase ( UpperCAmelCase_ , UpperCAmelCase_=None):
"""simple docstring"""
snake_case__ : Union[str, Any] = ids_tensor(UpperCAmelCase_ , vocab_size=2 , rng=UpperCAmelCase_)
# make sure that at least one token is attended to for each batch
snake_case__ : int = 1
return attn_mask
@require_flax
class lowercase__ :
"""simple docstring"""
__UpperCamelCase : Optional[int] = None
__UpperCamelCase : str = ()
def lowercase ( self : Optional[Any] ):
snake_case__ , snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
snake_case__ : List[Any] = 2
snake_case__ : Dict = inputs["""input_ids"""].shape[-1] // 2
snake_case__ : Optional[int] = inputs["""input_ids"""][:max_batch_size, :sequence_length]
snake_case__ : str = jnp.ones_like(__a )
snake_case__ : Optional[Any] = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
snake_case__ : Optional[Any] = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
snake_case__ : Optional[int] = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def lowercase ( self : Union[str, Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = self._get_input_ids_and_config()
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = max_length
snake_case__ : Tuple = 0
for model_class in self.all_generative_model_classes:
snake_case__ : Any = model_class(__a )
snake_case__ : str = model_class.__name__[4:] # Skip the "Flax" at the beginning
snake_case__ : Optional[int] = getattr(__a , __a )
snake_case__ : str = pt_model_class(__a ).eval()
snake_case__ : Tuple = load_flax_weights_in_pytorch_model(__a , flax_model.params )
snake_case__ : List[Any] = flax_model.generate(__a ).sequences
snake_case__ : List[Any] = pt_model.generate(torch.tensor(__a , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
snake_case__ : Union[str, Any] = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def lowercase ( self : List[str] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[Any] = self._get_input_ids_and_config()
snake_case__ : List[Any] = False
snake_case__ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case__ : List[str] = model_class(__a )
snake_case__ : int = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
snake_case__ : Any = jit(model.generate )
snake_case__ : List[str] = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self : Optional[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self._get_input_ids_and_config()
snake_case__ : Optional[Any] = True
snake_case__ : Union[str, Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case__ : Tuple = model_class(__a )
snake_case__ : Optional[int] = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
snake_case__ : Union[str, Any] = jit(model.generate )
snake_case__ : List[Any] = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self : Any ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Union[str, Any] = self._get_input_ids_and_config()
snake_case__ : Union[str, Any] = False
snake_case__ : Any = max_length
snake_case__ : Optional[int] = 2
for model_class in self.all_generative_model_classes:
snake_case__ : Optional[Any] = model_class(__a )
snake_case__ : Any = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
snake_case__ : Union[str, Any] = jit(model.generate )
snake_case__ : Dict = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self : Optional[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self._get_input_ids_and_config()
snake_case__ : Optional[Any] = False
snake_case__ : List[str] = max_length
snake_case__ : Optional[int] = 2
snake_case__ : Tuple = 2
for model_class in self.all_generative_model_classes:
snake_case__ : Any = model_class(__a )
snake_case__ : int = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def lowercase ( self : List[str] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self._get_input_ids_and_config()
snake_case__ : Dict = True
snake_case__ : Optional[int] = max_length
snake_case__ : Dict = 0.8
snake_case__ : int = 1_0
snake_case__ : List[str] = 0.3
snake_case__ : Optional[Any] = 1
snake_case__ : List[Any] = 8
snake_case__ : Any = 9
for model_class in self.all_generative_model_classes:
snake_case__ : int = model_class(__a )
snake_case__ : List[str] = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
snake_case__ : str = jit(model.generate )
snake_case__ : Tuple = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self : List[str] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self._get_input_ids_and_config()
snake_case__ : int = max_length
snake_case__ : Any = 1
snake_case__ : int = 8
snake_case__ : Union[str, Any] = 9
for model_class in self.all_generative_model_classes:
snake_case__ : List[Any] = model_class(__a )
snake_case__ : str = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
snake_case__ : List[str] = jit(model.generate )
snake_case__ : List[Any] = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self : List[Any] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : str = self._get_input_ids_and_config()
snake_case__ : List[str] = max_length
snake_case__ : Optional[Any] = 2
snake_case__ : int = 1
snake_case__ : Optional[Any] = 8
snake_case__ : int = 9
for model_class in self.all_generative_model_classes:
snake_case__ : List[str] = model_class(__a )
snake_case__ : List[Any] = model.generate(__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
snake_case__ : Optional[int] = jit(model.generate )
snake_case__ : Optional[Any] = jit_generate(__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self : Any ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Optional[int] = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case__ : str = attention_mask.at[(0, 0)].set(0 )
snake_case__ : Dict = False
snake_case__ : Optional[int] = max_length
for model_class in self.all_generative_model_classes:
snake_case__ : int = model_class(__a )
snake_case__ : List[Any] = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
snake_case__ : int = jit(model.generate )
snake_case__ : List[str] = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self : int ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : Dict = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case__ : Dict = attention_mask.at[(0, 0)].set(0 )
snake_case__ : Union[str, Any] = True
snake_case__ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case__ : List[str] = model_class(__a )
snake_case__ : List[str] = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
snake_case__ : List[Any] = jit(model.generate )
snake_case__ : int = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def lowercase ( self : List[str] ):
snake_case__ , snake_case__ , snake_case__ , snake_case__ : int = self._get_input_ids_and_config()
# pad attention mask on the left
snake_case__ : Optional[int] = attention_mask.at[(0, 0)].set(0 )
snake_case__ : Dict = 2
snake_case__ : List[Any] = max_length
for model_class in self.all_generative_model_classes:
snake_case__ : Union[str, Any] = model_class(__a )
snake_case__ : List[str] = model.generate(__a , attention_mask=__a ).sequences
self.assertEqual(generation_outputs.shape[-1] , __a )
snake_case__ : List[str] = jit(model.generate )
snake_case__ : int = jit_generate(__a , attention_mask=__a ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : List[Any] ):
snake_case__ : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
snake_case__ : int = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
snake_case__ : str = """Hello world"""
snake_case__ : int = tokenizer(__a , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__a , """do_samples""" ):
model.generate(__a , do_samples=__a )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__a , """foo""" ):
snake_case__ : List[str] = {"""foo""": """bar"""}
model.generate(__a , **__a )
| 127 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase_: Union[str, Any] = '<<<<<<< This should probably be modified because it mentions: '
lowercase_: Optional[Any] = '=======\n>>>>>>>\n'
lowercase_: List[Any] = [
'TextEncoderConfig',
'ByteTextEncoder',
'SubwordTextEncoder',
'encoder_config',
'maybe_build_from_corpus',
'manual_dir',
]
lowercase_: str = [
# (pattern, replacement)
# Order is important here for some replacements
(r'tfds\.core', r'datasets'),
(r'tf\.io\.gfile\.GFile', r'open'),
(r'tf\.([\w\d]+)', r'datasets.Value(\'\1\')'),
(r'tfds\.features\.Text\(\)', r'datasets.Value(\'string\')'),
(r'tfds\.features\.Text\(', r'datasets.Value(\'string\'),'),
(r'features\s*=\s*tfds.features.FeaturesDict\(', r'features=datasets.Features('),
(r'tfds\.features\.FeaturesDict\(', r'dict('),
(r'The TensorFlow Datasets Authors', r'The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'),
(r'tfds\.', r'datasets.'),
(r'dl_manager\.manual_dir', r'self.config.data_dir'),
(r'self\.builder_config', r'self.config'),
]
def _lowercase ( UpperCAmelCase_):
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory)
class lowercase__ (__snake_case ):
"""simple docstring"""
@staticmethod
def lowercase ( __a : ArgumentParser ):
snake_case__ : Optional[Any] = parser.add_parser(
"""convert""" , help="""Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.""" , )
train_parser.add_argument(
"""--tfds_path""" , type=__a , required=__a , help="""Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.""" , )
train_parser.add_argument(
"""--datasets_directory""" , type=__a , required=__a , help="""Path to the HuggingFace Datasets folder.""" )
train_parser.set_defaults(func=__a )
def __init__( self : Tuple , __a : str , __a : str , *__a : Tuple ):
snake_case__ : Union[str, Any] = get_logger("""datasets-cli/converting""" )
snake_case__ : Dict = tfds_path
snake_case__ : Tuple = datasets_directory
def lowercase ( self : Any ):
if os.path.isdir(self._tfds_path ):
snake_case__ : Union[str, Any] = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
snake_case__ : Any = os.path.dirname(self._tfds_path )
else:
raise ValueError("""--tfds_path is neither a directory nor a file. Please check path.""" )
snake_case__ : str = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
snake_case__ : Union[str, Any] = []
snake_case__ : Union[str, Any] = []
snake_case__ : Dict = {}
if os.path.isdir(self._tfds_path ):
snake_case__ : List[str] = os.listdir(__a )
else:
snake_case__ : Any = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
snake_case__ : List[Any] = os.path.join(__a , __a )
snake_case__ : str = os.path.join(__a , __a )
if not os.path.isfile(__a ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("""Skipping file""" )
continue
with open(__a , encoding="""utf-8""" ) as f:
snake_case__ : Dict = f.readlines()
snake_case__ : int = []
snake_case__ : List[Any] = False
snake_case__ : Union[str, Any] = False
snake_case__ : Optional[int] = []
for line in lines:
snake_case__ : Optional[int] = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
snake_case__ : List[str] = """import datasets\n"""
elif "import tensorflow" in out_line:
# order is important here
snake_case__ : Dict = """"""
continue
elif "from absl import logging" in out_line:
snake_case__ : str = """from datasets import logging\n"""
elif "getLogger" in out_line:
snake_case__ : str = out_line.replace("""getLogger""" , """get_logger""" )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
snake_case__ : List[str] = True
snake_case__ : List[str] = list(filter(lambda __a : e in out_line , __a ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(__a ) + """\n""" )
out_lines.append(__a )
out_lines.append(__a )
continue
else:
for pattern, replacement in TO_CONVERT:
snake_case__ : Tuple = re.sub(__a , __a , __a )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
snake_case__ : int = re.match(r"""from\stensorflow_datasets.*import\s([^\.\r\n]+)""" , __a )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(""",""" ) )
snake_case__ : Tuple = """from . import """ + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
snake_case__ : List[str] = True
out_lines.append(__a )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
snake_case__ : str = f_name.replace(""".py""" , """""" )
snake_case__ : List[Any] = os.path.join(__a , __a )
snake_case__ : Dict = os.path.join(__a , __a )
os.makedirs(__a , exist_ok=__a )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(__a )
if needs_manual_update:
with_manual_update.append(__a )
with open(__a , """w""" , encoding="""utf-8""" ) as f:
f.writelines(__a )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
snake_case__ : Any = os.path.basename(__a )
snake_case__ : List[Any] = imports_to_builder_map[f_name.replace(""".py""" , """""" )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(__a , __a )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 127 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : list[list[int | float]] ):
A = len(snake_case__ )
A = len(matrix[0] )
A = min(snake_case__ , snake_case__ )
for row in range(snake_case__ ):
# Check if diagonal element is not zero
if matrix[row][row] != 0:
# Eliminate all the elements below the diagonal
for col in range(row + 1 , snake_case__ ):
A = matrix[col][row] / matrix[row][row]
for i in range(snake_case__ , snake_case__ ):
matrix[col][i] -= multiplier * matrix[row][i]
else:
# Find a non-zero diagonal element to swap rows
A = True
for i in range(row + 1 , snake_case__ ):
if matrix[i][row] != 0:
A , A = matrix[i], matrix[row]
A = False
break
if reduce:
rank -= 1
for i in range(snake_case__ ):
A = matrix[i][rank]
# Reduce the row pointer by one to stay on the same row
row -= 1
return rank
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 |
"""simple docstring"""
import os
import torch
from ..logging import get_logger
from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME
from .versions import is_torch_version
if is_torch_version('''>=''', FSDP_PYTORCH_VERSION):
import torch.distributed.checkpoint as dist_cp
from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner
from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict
from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
_lowercase = get_logger(__name__)
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str=0 ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A = model.state_dict()
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A = os.path.join(snake_case__ , snake_case__ )
if accelerator.process_index == 0:
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Saving model to {output_model_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Model saved to {output_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A = os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'Saving model to {ckpt_dir}' )
A = {'model': state_dict}
dist_cp.save_state_dict(
state_dict=snake_case__ , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Model saved to {ckpt_dir}' )
def _snake_case ( snake_case__ : int , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : Any=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if type(snake_case__ ) != FSDP and accelerator.process_index != 0:
if not fsdp_plugin.sync_module_states:
raise ValueError(
'Set the `sync_module_states` flag to `True` so that model states are synced across processes when '
'initializing FSDP object' )
return
A = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin'
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading model from {input_model_file}' )
A = torch.load(snake_case__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT:
A = (
F'{MODEL_NAME}_rank{accelerator.process_index}.bin'
if model_index == 0
else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading model from {input_model_file}' )
A = torch.load(snake_case__ )
logger.info(F'Model loaded from {input_model_file}' )
elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT:
A = (
os.path.join(snake_case__ , F'{MODEL_NAME}_{model_index}' )
if F'{MODEL_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading model from {ckpt_dir}' )
A = {'model': model.state_dict()}
dist_cp.load_state_dict(
state_dict=snake_case__ , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , planner=DefaultLoadPlanner() , )
A = state_dict['model']
logger.info(F'Model loaded from {ckpt_dir}' )
model.load_state_dict(snake_case__ )
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Any=0 ):
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
A = FSDP.optim_state_dict(snake_case__ , snake_case__ )
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
if accelerator.process_index == 0:
A = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Saving Optimizer state to {output_optimizer_file}' )
torch.save(snake_case__ , snake_case__ )
logger.info(F'Optimizer state saved in {output_optimizer_file}' )
else:
A = os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
os.makedirs(snake_case__ , exist_ok=snake_case__ )
logger.info(F'Saving Optimizer state to {ckpt_dir}' )
dist_cp.save_state_dict(
state_dict={'optimizer': optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case__ ) , planner=DefaultSavePlanner() , )
logger.info(F'Optimizer state saved in {ckpt_dir}' )
def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int]=0 ):
accelerator.wait_for_everyone()
with FSDP.state_dict_type(
snake_case__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ):
if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT:
A = None
# below check should work but currently it isn't working (mostly opytorch issue),
# in the meantime disabling it at the cost of excess memory usage
# if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only:
A = (
F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin'
)
A = os.path.join(snake_case__ , snake_case__ )
logger.info(F'Loading Optimizer state from {input_optimizer_file}' )
A = torch.load(snake_case__ )
logger.info(F'Optimizer state loaded from {input_optimizer_file}' )
else:
A = (
os.path.join(snake_case__ , F'{OPTIMIZER_NAME}_{optimizer_index}' )
if F'{OPTIMIZER_NAME}' not in input_dir
else input_dir
)
logger.info(F'Loading Optimizer from {ckpt_dir}' )
A = load_sharded_optimizer_state_dict(
model_state_dict=model.state_dict() , optimizer_key='optimizer' , storage_reader=dist_cp.FileSystemReader(snake_case__ ) , )
A = optim_state['optimizer']
logger.info(F'Optimizer loaded from {ckpt_dir}' )
A = FSDP.optim_state_dict_to_load(snake_case__ , snake_case__ , snake_case__ )
optimizer.load_state_dict(snake_case__ ) | 91 | 1 |
"""simple docstring"""
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / '''utils'''))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowercase_ : List[str] = get_tests_dir('''fixtures/dummy_feature_extractor_config.json''')
lowercase_ : str = get_tests_dir('''fixtures/vocab.json''')
lowercase_ : Any = get_tests_dir('''fixtures''')
class UpperCamelCase ( unittest.TestCase ):
A__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = 0
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaConfig()
_SCREAMING_SNAKE_CASE : Optional[int] = AutoProcessor.from_pretrained("facebook/wav2vec2-base-960h" )
# save in new folder
model_config.save_pretrained(snake_case__ )
processor.save_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , snake_case__ ) )
copyfile(snake_case__ , os.path.join(snake_case__ , "vocab.json" ) )
_SCREAMING_SNAKE_CASE : Tuple = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaFeatureExtractor()
_SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
_SCREAMING_SNAKE_CASE : Any = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in tokenizer
with open(os.path.join(snake_case__ , snake_case__ ) , "r" ) as f:
_SCREAMING_SNAKE_CASE : Dict = json.load(snake_case__ )
config_dict.pop("processor_class" )
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f:
f.write(json.dumps(snake_case__ ) )
_SCREAMING_SNAKE_CASE : Dict = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : Tuple = WavaVecaFeatureExtractor()
_SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained("facebook/wav2vec2-base-960h" )
_SCREAMING_SNAKE_CASE : List[str] = WavaVecaProcessor(snake_case__ , snake_case__ )
# save in new folder
processor.save_pretrained(snake_case__ )
# drop `processor_class` in feature extractor
with open(os.path.join(snake_case__ , snake_case__ ) , "r" ) as f:
_SCREAMING_SNAKE_CASE : Optional[Any] = json.load(snake_case__ )
config_dict.pop("processor_class" )
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f:
f.write(json.dumps(snake_case__ ) )
_SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmpdirname:
_SCREAMING_SNAKE_CASE : List[str] = WavaVecaConfig(processor_class="Wav2Vec2Processor" )
model_config.save_pretrained(snake_case__ )
# copy relevant files
copyfile(snake_case__ , os.path.join(snake_case__ , "vocab.json" ) )
# create emtpy sample processor
with open(os.path.join(snake_case__ , snake_case__ ) , "w" ) as f:
f.write("{}" )
_SCREAMING_SNAKE_CASE : Optional[int] = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
with self.assertRaises(snake_case__ ):
_SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(snake_case__ ):
_SCREAMING_SNAKE_CASE : Tuple = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
_SCREAMING_SNAKE_CASE : Dict = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
_SCREAMING_SNAKE_CASE : Union[str, Any] = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
_SCREAMING_SNAKE_CASE : str = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizerFast" )
# Test we can also load the slow version
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ , use_fast=snake_case__ )
_SCREAMING_SNAKE_CASE : Optional[Any] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , "NewTokenizer" )
else:
self.assertEqual(tokenizer.__class__.__name__ , "NewTokenizer" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
try:
AutoConfig.register("custom" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(snake_case__ ):
AutoProcessor.register(snake_case__ , snake_case__ )
# Now that the config is registered, it can be used as any other config with the auto-API
_SCREAMING_SNAKE_CASE : List[str] = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE : Any = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_SCREAMING_SNAKE_CASE : Any = CustomTokenizer(snake_case__ )
_SCREAMING_SNAKE_CASE : int = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(snake_case__ )
_SCREAMING_SNAKE_CASE : Any = AutoProcessor.from_pretrained(snake_case__ )
self.assertIsInstance(snake_case__ , snake_case__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = False
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = False
class UpperCamelCase ( __SCREAMING_SNAKE_CASE ):
A__ = """AutoFeatureExtractor"""
A__ = """AutoTokenizer"""
A__ = False
try:
AutoConfig.register("custom" , snake_case__ )
AutoFeatureExtractor.register(snake_case__ , snake_case__ )
AutoTokenizer.register(snake_case__ , slow_tokenizer_class=snake_case__ )
AutoProcessor.register(snake_case__ , snake_case__ )
# If remote code is not set, the default is to use local classes.
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/test_dynamic_processor" )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
_SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
_SCREAMING_SNAKE_CASE : int = AutoProcessor.from_pretrained(
"hf-internal-testing/test_dynamic_processor" , trust_remote_code=snake_case__ )
self.assertEqual(processor.__class__.__name__ , "NewProcessor" )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-bert" )
self.assertEqual(processor.__class__.__name__ , "BertTokenizerFast" )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoProcessor.from_pretrained("hf-internal-testing/tiny-random-convnext" )
self.assertEqual(processor.__class__.__name__ , "ConvNextImageProcessor" )
@is_staging_test
class UpperCamelCase ( unittest.TestCase ):
A__ = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """bla""", """blou"""]
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[Any] = TOKEN
HfFolder.save_token(snake_case__ )
@classmethod
def __SCREAMING_SNAKE_CASE ( cls ):
"""simple docstring"""
try:
delete_repo(token=cls._token , repo_id="test-processor" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-processor-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-processor" )
except HTTPError:
pass
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , "test-processor" ) , push_to_hub=snake_case__ , use_auth_token=self._token )
_SCREAMING_SNAKE_CASE : List[str] = WavaVecaProcessor.from_pretrained(F'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Optional[int] = WavaVecaProcessor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(snake_case__ , "test-processor-org" ) , push_to_hub=snake_case__ , use_auth_token=self._token , organization="valid_org" , )
_SCREAMING_SNAKE_CASE : Optional[Any] = WavaVecaProcessor.from_pretrained("valid_org/test-processor-org" )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(snake_case__ , getattr(new_processor.feature_extractor , snake_case__ ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def __SCREAMING_SNAKE_CASE ( self ):
"""simple docstring"""
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
_SCREAMING_SNAKE_CASE : Optional[int] = CustomFeatureExtractor.from_pretrained(snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(snake_case__ , "vocab.txt" )
with open(snake_case__ , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
_SCREAMING_SNAKE_CASE : Union[str, Any] = CustomTokenizer(snake_case__ )
_SCREAMING_SNAKE_CASE : List[Any] = CustomProcessor(snake_case__ , snake_case__ )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(F'''{USER}/test-dynamic-processor''' , token=self._token )
_SCREAMING_SNAKE_CASE : int = Repository(snake_case__ , clone_from=F'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(snake_case__ )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
"AutoFeatureExtractor": "custom_feature_extraction.CustomFeatureExtractor",
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(snake_case__ , "tokenizer_config.json" ) ) as f:
_SCREAMING_SNAKE_CASE : str = json.load(snake_case__ )
self.assertDictEqual(
tokenizer_config["auto_map"] , {
"AutoTokenizer": ["custom_tokenization.CustomTokenizer", None],
"AutoProcessor": "custom_processing.CustomProcessor",
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_feature_extraction.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_tokenization.py" ) ) )
self.assertTrue(os.path.isfile(os.path.join(snake_case__ , "custom_processing.py" ) ) )
repo.push_to_hub()
_SCREAMING_SNAKE_CASE : List[str] = AutoProcessor.from_pretrained(F'''{USER}/test-dynamic-processor''' , trust_remote_code=snake_case__ )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , "CustomProcessor" )
| 295 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _lowerCAmelCase ( lowerCamelCase__ : Any, lowerCamelCase__ : Optional[Any], lowerCamelCase__ : List[str], lowerCamelCase__ : Any=1_0_2_4 ) -> Optional[int]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = [], []
_SCREAMING_SNAKE_CASE : Union[str, Any] = list(zip(lowerCamelCase__, lowerCamelCase__ ) )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = sorted_examples[0]
def is_too_big(lowerCamelCase__ : List[Any] ):
return tok(lowerCamelCase__, return_tensors="pt" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_SCREAMING_SNAKE_CASE : int = new_src + " " + src
_SCREAMING_SNAKE_CASE : Union[str, Any] = new_tgt + " " + tgt
if is_too_big(lowerCamelCase__ ) or is_too_big(lowerCamelCase__ ): # cant fit, finalize example
finished_src.append(lowerCamelCase__ )
finished_tgt.append(lowerCamelCase__ )
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = src, tgt
else: # can fit, keep adding
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(lowerCamelCase__ )
finished_tgt.append(lowerCamelCase__ )
return finished_src, finished_tgt
def _lowerCAmelCase ( lowerCamelCase__ : List[str], lowerCamelCase__ : Path, lowerCamelCase__ : Dict, lowerCamelCase__ : List[str] ) -> List[Any]:
_SCREAMING_SNAKE_CASE : int = Path(lowerCamelCase__ )
save_path.mkdir(exist_ok=lowerCamelCase__ )
for split in ["train"]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
_SCREAMING_SNAKE_CASE : Any = [x.rstrip() for x in Path(lowerCamelCase__ ).open().readlines()]
_SCREAMING_SNAKE_CASE : int = [x.rstrip() for x in Path(lowerCamelCase__ ).open().readlines()]
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = pack_examples(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ )
print(f'''packed {split} split from {len(lowerCamelCase__ )} examples -> {len(lowerCamelCase__ )}.''' )
Path(save_path / f'''{split}.source''' ).open("w" ).write("\n".join(lowerCamelCase__ ) )
Path(save_path / f'''{split}.target''' ).open("w" ).write("\n".join(lowerCamelCase__ ) )
for split in ["val", "test"]:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = data_dir / f'''{split}.source''', data_dir / f'''{split}.target'''
shutil.copyfile(lowerCamelCase__, save_path / f'''{split}.source''' )
shutil.copyfile(lowerCamelCase__, save_path / f'''{split}.target''' )
def _lowerCAmelCase ( ) -> int:
_SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--tok_name", type=lowerCamelCase__, help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("--max_seq_len", type=lowerCamelCase__, default=1_2_8 )
parser.add_argument("--data_dir", type=lowerCamelCase__ )
parser.add_argument("--save_path", type=lowerCamelCase__ )
_SCREAMING_SNAKE_CASE : Tuple = parser.parse_args()
_SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(lowerCamelCase__, Path(args.data_dir ), args.max_seq_len, args.save_path )
if __name__ == "__main__":
packer_cli()
| 295 | 1 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
_lowerCAmelCase = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase__ :
snake_case_ = 42
snake_case_ = 42
snake_case_ = 42
@dataclass
class UpperCAmelCase__ :
snake_case_ = 42
snake_case_ = 42
snake_case_ = None
snake_case_ = None
class UpperCAmelCase__ ( _lowercase ):
snake_case_ = """train"""
snake_case_ = """dev"""
snake_case_ = """test"""
class UpperCAmelCase__ :
@staticmethod
def snake_case_ ( A__ , A__ ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def snake_case_ ( A__ ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def snake_case_ ( A__ , A__ , A__ , A__ , A__=False , A__="[CLS]" , A__=1 , A__="[SEP]" , A__=False , A__=False , A__=0 , A__=0 , A__=-100 , A__=0 , A__=True , ):
"""simple docstring"""
UpperCAmelCase_: Dict = {label: i for i, label in enumerate(A__ )}
UpperCAmelCase_: List[Any] = []
for ex_index, example in enumerate(A__ ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" , A__ , len(A__ ) )
UpperCAmelCase_: List[str] = []
UpperCAmelCase_: Tuple = []
for word, label in zip(example.words , example.labels ):
UpperCAmelCase_: List[Any] = tokenizer.tokenize(A__ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(A__ ) > 0:
tokens.extend(A__ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(A__ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
UpperCAmelCase_: str = tokenizer.num_special_tokens_to_add()
if len(A__ ) > max_seq_length - special_tokens_count:
UpperCAmelCase_: str = tokens[: (max_seq_length - special_tokens_count)]
UpperCAmelCase_: List[str] = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
UpperCAmelCase_: int = [sequence_a_segment_id] * len(A__ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
UpperCAmelCase_: Tuple = [cls_token] + tokens
UpperCAmelCase_: List[Any] = [pad_token_label_id] + label_ids
UpperCAmelCase_: Dict = [cls_token_segment_id] + segment_ids
UpperCAmelCase_: Dict = tokenizer.convert_tokens_to_ids(A__ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
UpperCAmelCase_: Optional[Any] = [1 if mask_padding_with_zero else 0] * len(A__ )
# Zero-pad up to the sequence length.
UpperCAmelCase_: Union[str, Any] = max_seq_length - len(A__ )
if pad_on_left:
UpperCAmelCase_: Tuple = ([pad_token] * padding_length) + input_ids
UpperCAmelCase_: int = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
UpperCAmelCase_: Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
UpperCAmelCase_: List[str] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
assert len(A__ ) == max_seq_length
if ex_index < 5:
logger.info("*** Example ***" )
logger.info("guid: %s" , example.guid )
logger.info("tokens: %s" , " ".join([str(A__ ) for x in tokens] ) )
logger.info("input_ids: %s" , " ".join([str(A__ ) for x in input_ids] ) )
logger.info("input_mask: %s" , " ".join([str(A__ ) for x in input_mask] ) )
logger.info("segment_ids: %s" , " ".join([str(A__ ) for x in segment_ids] ) )
logger.info("label_ids: %s" , " ".join([str(A__ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
UpperCAmelCase_: List[str] = None
features.append(
InputFeatures(
input_ids=A__ , attention_mask=A__ , token_type_ids=A__ , label_ids=A__ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase__ ( _lowercase ):
snake_case_ = 42
snake_case_ = nn.CrossEntropyLoss().ignore_index
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ = None , A__=False , A__ = Split.train , ):
"""simple docstring"""
UpperCAmelCase_: List[str] = os.path.join(
A__ , "cached_{}_{}_{}".format(mode.value , tokenizer.__class__.__name__ , str(A__ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_: str = cached_features_file + ".lock"
with FileLock(A__ ):
if os.path.exists(A__ ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
UpperCAmelCase_: Optional[int] = torch.load(A__ )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
UpperCAmelCase_: str = token_classification_task.read_examples_from_file(A__ , A__ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCAmelCase_: str = token_classification_task.convert_examples_to_features(
A__ , A__ , A__ , A__ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A__ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(F"Saving features into cached file {cached_features_file}" )
torch.save(self.features , A__ )
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , A__ ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase__ :
snake_case_ = 42
snake_case_ = -100
def __init__( self , A__ , A__ , A__ , A__ , A__ , A__ = None , A__=False , A__ = Split.train , ):
"""simple docstring"""
UpperCAmelCase_: List[Any] = token_classification_task.read_examples_from_file(A__ , A__ )
# TODO clean up all this to leverage built-in features of tokenizers
UpperCAmelCase_: Optional[int] = token_classification_task.convert_examples_to_features(
A__ , A__ , A__ , A__ , cls_token_at_end=bool(model_type in ["xlnet"] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["xlnet"] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=A__ , pad_on_left=bool(tokenizer.padding_side == "left" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
UpperCAmelCase_: Tuple = tf.data.Dataset.from_generator(
A__ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa}, tf.intaa) , (
{"input_ids": tf.TensorShape([None] ), "attention_mask": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
UpperCAmelCase_: Optional[int] = tf.data.Dataset.from_generator(
A__ , ({"input_ids": tf.intaa, "attention_mask": tf.intaa, "token_type_ids": tf.intaa}, tf.intaa) , (
{
"input_ids": tf.TensorShape([None] ),
"attention_mask": tf.TensorShape([None] ),
"token_type_ids": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self ):
"""simple docstring"""
return len(self.features )
def __getitem__( self , A__ ):
"""simple docstring"""
return self.features[i] | 137 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
snake_case__ : int = logging.get_logger(__name__)
class _A ( _lowercase ):
'''simple docstring'''
_snake_case : Optional[Any] = ["""pixel_values"""]
def __init__( self : Any , lowerCamelCase : bool = True , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : float = None , lowerCamelCase : PILImageResampling = PILImageResampling.BILINEAR , lowerCamelCase : bool = True , lowerCamelCase : Union[int, float] = 1 / 255 , lowerCamelCase : bool = True , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__lowercase = size if size is not None else {"shortest_edge": 384}
__lowercase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__lowercase = do_resize
__lowercase = size
# Default value set here for backwards compatibility where the value in config is None
__lowercase = crop_pct if crop_pct is not None else 224 / 256
__lowercase = resample
__lowercase = do_rescale
__lowercase = rescale_factor
__lowercase = do_normalize
__lowercase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__lowercase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : Dict[str, int] , lowerCamelCase : float , lowerCamelCase : PILImageResampling = PILImageResampling.BICUBIC , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : List[Any] , ):
'''simple docstring'''
__lowercase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""Size dictionary must contain 'shortest_edge' key. Got {size.keys()}""" )
__lowercase = size["shortest_edge"]
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__lowercase = int(shortest_edge / crop_pct )
__lowercase = get_resize_output_image_size(lowerCamelCase , size=lowerCamelCase , default_to_square=lowerCamelCase )
__lowercase = resize(image=lowerCamelCase , size=lowerCamelCase , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=lowerCamelCase , size=(shortest_edge, shortest_edge) , data_format=lowerCamelCase , **lowerCamelCase )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
lowerCamelCase , size=(shortest_edge, shortest_edge) , resample=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Dict , lowerCamelCase : np.ndarray , lowerCamelCase : Union[int, float] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : str , ):
'''simple docstring'''
return rescale(lowerCamelCase , scale=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : Optional[Any] , lowerCamelCase : np.ndarray , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Union[float, List[float]] , lowerCamelCase : Optional[Union[str, ChannelDimension]] = None , **lowerCamelCase : Union[str, Any] , ):
'''simple docstring'''
return normalize(lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase , data_format=lowerCamelCase , **lowerCamelCase )
def _snake_case ( self : int , lowerCamelCase : ImageInput , lowerCamelCase : bool = None , lowerCamelCase : Dict[str, int] = None , lowerCamelCase : float = None , lowerCamelCase : PILImageResampling = None , lowerCamelCase : bool = None , lowerCamelCase : float = None , lowerCamelCase : bool = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[float, List[float]]] = None , lowerCamelCase : Optional[Union[str, TensorType]] = None , lowerCamelCase : ChannelDimension = ChannelDimension.FIRST , **lowerCamelCase : Any , ):
'''simple docstring'''
__lowercase = do_resize if do_resize is not None else self.do_resize
__lowercase = crop_pct if crop_pct is not None else self.crop_pct
__lowercase = resample if resample is not None else self.resample
__lowercase = do_rescale if do_rescale is not None else self.do_rescale
__lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor
__lowercase = do_normalize if do_normalize is not None else self.do_normalize
__lowercase = image_mean if image_mean is not None else self.image_mean
__lowercase = image_std if image_std is not None else self.image_std
__lowercase = size if size is not None else self.size
__lowercase = get_size_dict(lowerCamelCase , default_to_square=lowerCamelCase )
__lowercase = make_list_of_images(lowerCamelCase )
if not valid_images(lowerCamelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError("crop_pct must be specified if size < 384." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
__lowercase = [to_numpy_array(lowerCamelCase ) for image in images]
if do_resize:
__lowercase = [self.resize(image=lowerCamelCase , size=lowerCamelCase , crop_pct=lowerCamelCase , resample=lowerCamelCase ) for image in images]
if do_rescale:
__lowercase = [self.rescale(image=lowerCamelCase , scale=lowerCamelCase ) for image in images]
if do_normalize:
__lowercase = [self.normalize(image=lowerCamelCase , mean=lowerCamelCase , std=lowerCamelCase ) for image in images]
__lowercase = [to_channel_dimension_format(lowerCamelCase , lowerCamelCase ) for image in images]
__lowercase = {"pixel_values": images}
return BatchFeature(data=lowerCamelCase , tensor_type=lowerCamelCase )
| 402 | 0 |
'''simple docstring'''
import math
def __lowercase (_lowercase ) -> bool:
"""simple docstring"""
__lowerCamelCase : Union[str, Any] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_lowercase )
def __lowercase (_lowercase = 1 / 12_345 ) -> int:
"""simple docstring"""
__lowerCamelCase : Tuple = 0
__lowerCamelCase : List[str] = 0
__lowerCamelCase : List[str] = 3
while True:
__lowerCamelCase : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_lowercase ):
__lowerCamelCase : Optional[int] = int(_lowercase )
total_partitions += 1
if check_partition_perfect(_lowercase ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_lowercase )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 483 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
def __lowercase (_lowercase ) -> int:
"""simple docstring"""
if not postfix_notation:
return 0
__lowerCamelCase : Optional[int] = {"""+""", """-""", """*""", """/"""}
__lowerCamelCase : list[Any] = []
for token in postfix_notation:
if token in operations:
__lowerCamelCase , __lowerCamelCase : List[Any] = stack.pop(), stack.pop()
if token == "+":
stack.append(a + b )
elif token == "-":
stack.append(a - b )
elif token == "*":
stack.append(a * b )
else:
if a * b < 0 and a % b != 0:
stack.append(a // b + 1 )
else:
stack.append(a // b )
else:
stack.append(int(_lowercase ) )
return stack.pop()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 483 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class UpperCAmelCase__ ( metaclass=lowercase__ ):
"""simple docstring"""
__UpperCAmelCase : List[str] = ['''keras_nlp''']
def __init__( self : Union[str, Any] ,*_a : List[Any] ,**_a : int ):
'''simple docstring'''
requires_backends(self ,['keras_nlp'] )
| 229 |
'''simple docstring'''
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def UpperCAmelCase_ ():
"""simple docstring"""
raise RuntimeError('CUDA out of memory.' )
class UpperCAmelCase__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Any ):
'''simple docstring'''
super().__init__()
_a : Union[str, Any] = nn.Linear(3 ,4 )
_a : Optional[int] = nn.BatchNormad(4 )
_a : List[Any] = nn.Linear(4 ,5 )
def __lowercase ( self : Dict ,_a : Any ):
'''simple docstring'''
return self.lineara(self.batchnorm(self.lineara(_a ) ) )
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : List[Any] ):
'''simple docstring'''
_a : int = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a : Any ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(_a ,[128, 64, 32, 16, 8] )
def __lowercase ( self : Dict ):
'''simple docstring'''
_a : List[str] = []
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a : Tuple ,_a : str ):
nonlocal batch_sizes
batch_sizes.append(_a )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
_a, _a : int = mock_training_loop_function('hello' )
self.assertListEqual(_a ,[128, 64, 32, 16, 8] )
self.assertListEqual([bs, arga] ,[8, 'hello'] )
def __lowercase ( self : int ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(_a : Optional[Any] ):
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' ,cm.exception.args[0] )
def __lowercase ( self : Dict ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a : List[str] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('No executable batch size found, reached zero.' ,cm.exception.args[0] )
def __lowercase ( self : Dict ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=128 )
def mock_training_loop_function(_a : Optional[int] ,_a : Tuple ,_a : List[str] ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(_a ) as cm:
mock_training_loop_function(128 ,'hello' ,'world' )
self.assertIn('Batch size was passed into `f`' ,cm.exception.args[0] )
self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' ,cm.exception.args[0] )
def __lowercase ( self : str ):
'''simple docstring'''
@find_executable_batch_size(starting_batch_size=16 )
def mock_training_loop_function(_a : int ):
raise ValueError('Oops, we had an error!' )
with self.assertRaises(_a ) as cm:
mock_training_loop_function()
self.assertIn('Oops, we had an error!' ,cm.exception.args[0] )
@require_cuda
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Union[str, Any] = torch.cuda.memory_allocated()
_a : Optional[int] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() ,_a )
_a : Dict = release_memory(_a )
self.assertEqual(torch.cuda.memory_allocated() ,_a )
| 229 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : str = ""
else:
_UpperCAmelCase : Any = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : str = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = dct.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = val
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ViTMSNConfig()
_UpperCAmelCase : int = 1_0_0_0
_UpperCAmelCase : str = "datasets/huggingface/label-files"
_UpperCAmelCase : int = "imagenet-1k-id2label.json"
_UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , "r" ) )
_UpperCAmelCase : Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[Any] = idalabel
_UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = 3_8_4
_UpperCAmelCase : Any = 1_5_3_6
_UpperCAmelCase : Optional[Any] = 6
elif "l16" in checkpoint_url:
_UpperCAmelCase : Optional[int] = 1_0_2_4
_UpperCAmelCase : Union[str, Any] = 4_0_9_6
_UpperCAmelCase : Any = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : List[Any] = 0.1
elif "b4" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
_UpperCAmelCase : str = 7
_UpperCAmelCase : List[str] = 1_0_2_4
_UpperCAmelCase : int = 4_0_9_6
_UpperCAmelCase : List[str] = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : Optional[int] = 0.1
_UpperCAmelCase : Any = ViTMSNModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )["target_encoder"]
_UpperCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
_UpperCAmelCase : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_UpperCAmelCase : int = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
_UpperCAmelCase : Any = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
_UpperCAmelCase : List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 328 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Any:
"""simple docstring"""
_UpperCAmelCase : str = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCAmelCase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCAmelCase : str = ""
else:
_UpperCAmelCase : Any = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCAmelCase : List[str] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
_UpperCAmelCase : str = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase : Any = in_proj_weight[
: config.hidden_size, :
]
_UpperCAmelCase : Optional[int] = in_proj_bias[: config.hidden_size]
_UpperCAmelCase : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCAmelCase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCAmelCase : List[str] = in_proj_weight[
-config.hidden_size :, :
]
_UpperCAmelCase : Any = in_proj_bias[-config.hidden_size :]
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[str] = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = [
"module.fc.fc1.weight",
"module.fc.fc1.bias",
"module.fc.bn1.weight",
"module.fc.bn1.bias",
"module.fc.bn1.running_mean",
"module.fc.bn1.running_var",
"module.fc.bn1.num_batches_tracked",
"module.fc.fc2.weight",
"module.fc.fc2.bias",
"module.fc.bn2.weight",
"module.fc.bn2.bias",
"module.fc.bn2.running_mean",
"module.fc.bn2.running_var",
"module.fc.bn2.num_batches_tracked",
"module.fc.fc3.weight",
"module.fc.fc3.bias",
]
for k in ignore_keys:
state_dict.pop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[Any] = dct.pop(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any = val
def lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = ViTMSNConfig()
_UpperCAmelCase : int = 1_0_0_0
_UpperCAmelCase : str = "datasets/huggingface/label-files"
_UpperCAmelCase : int = "imagenet-1k-id2label.json"
_UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , "r" ) )
_UpperCAmelCase : Dict = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
_UpperCAmelCase : List[Any] = idalabel
_UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = 3_8_4
_UpperCAmelCase : Any = 1_5_3_6
_UpperCAmelCase : Optional[Any] = 6
elif "l16" in checkpoint_url:
_UpperCAmelCase : Optional[int] = 1_0_2_4
_UpperCAmelCase : Union[str, Any] = 4_0_9_6
_UpperCAmelCase : Any = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : List[Any] = 0.1
elif "b4" in checkpoint_url:
_UpperCAmelCase : Optional[Any] = 4
elif "l7" in checkpoint_url:
_UpperCAmelCase : str = 7
_UpperCAmelCase : List[str] = 1_0_2_4
_UpperCAmelCase : int = 4_0_9_6
_UpperCAmelCase : List[str] = 2_4
_UpperCAmelCase : Any = 1_6
_UpperCAmelCase : Optional[int] = 0.1
_UpperCAmelCase : Any = ViTMSNModel(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : List[str] = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" )["target_encoder"]
_UpperCAmelCase : Union[str, Any] = ViTImageProcessor(size=config.image_size )
remove_projection_head(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : str = create_rename_keys(_SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
read_in_q_k_v(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , base_model=_SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
model.eval()
_UpperCAmelCase : Any = "http://images.cocodataset.org/val2017/000000039769.jpg"
_UpperCAmelCase : Any = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
_UpperCAmelCase : Optional[int] = ViTImageProcessor(
size=config.image_size , image_mean=_SCREAMING_SNAKE_CASE , image_std=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Union[str, Any] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
_UpperCAmelCase : int = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Tuple = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[-1.0915, -1.4876, -1.1809]] )
elif "b16" in checkpoint_url:
_UpperCAmelCase : Union[str, Any] = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
_UpperCAmelCase : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
_UpperCAmelCase : Any = torch.tensor([[-4.3868, 5.2932, -0.4137]] )
else:
_UpperCAmelCase : List[Any] = torch.tensor([[-0.1792, -0.6465, 2.4263]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
__lowerCamelCase = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 328 | 1 |
'''simple docstring'''
def _lowerCAmelCase ( __magic_name__ : float , __magic_name__ : float ) -> float:
if mass < 0:
raise ValueError('''The mass of a body cannot be negative''' )
return 0.5 * mass * abs(__magic_name__ ) * abs(__magic_name__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 92 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __SCREAMING_SNAKE_CASE :
def __init__( self : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str=13 , UpperCAmelCase__ : Tuple=7 , UpperCAmelCase__ : Dict=True , UpperCAmelCase__ : Optional[int]=True , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : Tuple=True , UpperCAmelCase__ : Optional[Any]=99 , UpperCAmelCase__ : Any=16 , UpperCAmelCase__ : Optional[Any]=36 , UpperCAmelCase__ : str=6 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=6 , UpperCAmelCase__ : Any=37 , UpperCAmelCase__ : List[str]="gelu" , UpperCAmelCase__ : Union[str, Any]=0.1 , UpperCAmelCase__ : Optional[int]=0.1 , UpperCAmelCase__ : List[str]=512 , UpperCAmelCase__ : List[str]=16 , UpperCAmelCase__ : Optional[int]=2 , UpperCAmelCase__ : Any=0.02 , UpperCAmelCase__ : Union[str, Any]=3 , UpperCAmelCase__ : List[Any]=4 , UpperCAmelCase__ : Tuple=None , ):
'''simple docstring'''
lowercase : str =parent
lowercase : int =batch_size
lowercase : Any =seq_length
lowercase : int =is_training
lowercase : str =use_input_mask
lowercase : int =use_token_type_ids
lowercase : Dict =use_labels
lowercase : int =vocab_size
lowercase : str =embedding_size
lowercase : Union[str, Any] =hidden_size
lowercase : Tuple =num_hidden_layers
lowercase : Any =num_hidden_groups
lowercase : Union[str, Any] =num_attention_heads
lowercase : Any =intermediate_size
lowercase : Tuple =hidden_act
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Union[str, Any] =attention_probs_dropout_prob
lowercase : List[Any] =max_position_embeddings
lowercase : int =type_vocab_size
lowercase : int =type_sequence_label_size
lowercase : Any =initializer_range
lowercase : List[Any] =num_labels
lowercase : int =num_choices
lowercase : Optional[int] =scope
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[int] =None
if self.use_input_mask:
lowercase : List[str] =random_attention_mask([self.batch_size, self.seq_length] )
lowercase : Dict =None
if self.use_token_type_ids:
lowercase : List[str] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase : Tuple =None
lowercase : Any =None
lowercase : Dict =None
if self.use_labels:
lowercase : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : str =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Optional[Any] =ids_tensor([self.batch_size] , self.num_choices )
lowercase : Any =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def lowerCamelCase_ ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str ):
'''simple docstring'''
lowercase : int =AlbertModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : Dict =model(UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ )
lowercase : int =model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCamelCase_ ( self : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Dict , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple ):
'''simple docstring'''
lowercase : Tuple =AlbertForPreTraining(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , sentence_order_label=UpperCAmelCase__ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def lowerCamelCase_ ( self : List[Any] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int ):
'''simple docstring'''
lowercase : Tuple =AlbertForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : str =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self : Any , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : str , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : List[str] =AlbertForQuestionAnswering(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , start_positions=UpperCAmelCase__ , end_positions=UpperCAmelCase__ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
lowercase : Optional[Any] =self.num_labels
lowercase : Any =AlbertForSequenceClassification(UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : Dict =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , UpperCAmelCase__ : str , UpperCAmelCase__ : Dict ):
'''simple docstring'''
lowercase : List[Any] =self.num_labels
lowercase : str =AlbertForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : int =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase : Optional[int] =self.num_choices
lowercase : List[Any] =AlbertForMultipleChoice(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
lowercase : List[Any] =input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : int =input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] =model(
UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , token_type_ids=UpperCAmelCase__ , labels=UpperCAmelCase__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : Union[str, Any] =self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Dict =config_and_inputs
lowercase : Optional[Any] ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , unittest.TestCase ):
lowerCamelCase_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCamelCase_ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCamelCase_ = True
def lowerCamelCase_ ( self : List[str] , UpperCAmelCase__ : Any , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int=False ):
'''simple docstring'''
lowercase : Optional[int] =super()._prepare_for_class(UpperCAmelCase__ , UpperCAmelCase__ , return_labels=UpperCAmelCase__ )
if return_labels:
if model_class in get_values(UpperCAmelCase__ ):
lowercase : Any =torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCAmelCase__ )
lowercase : Any =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=UpperCAmelCase__ )
return inputs_dict
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
lowercase : Tuple =AlbertModelTester(self )
lowercase : Optional[Any] =ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowercase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowercase : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
lowercase : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*UpperCAmelCase__ )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
lowercase : Optional[int] =self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowercase : Tuple =type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
@slow
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : str =AlbertModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowercase : int =AlbertModel.from_pretrained('''albert-base-v2''' )
lowercase : Optional[int] =torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowercase : Any =torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowercase : Any =model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )[0]
lowercase : int =torch.Size((1, 11, 768) )
self.assertEqual(output.shape , UpperCAmelCase__ )
lowercase : Union[str, Any] =torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCAmelCase__ , atol=1E-4 ) )
| 92 | 1 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCAmelCase_ ( unittest.TestCase ):
UpperCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] ):
_UpperCamelCase = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = VideoClassificationPipeline(model=_A , image_processor=_A , top_k=2 )
_UpperCamelCase = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def UpperCamelCase_ ( self : str , _A : Optional[Any] , _A : Optional[int] ):
for example in examples:
_UpperCamelCase = video_classifier(_A )
self.assertEqual(
_A , [
{'''score''': ANY(_A ), '''label''': ANY(_A )},
{'''score''': ANY(_A ), '''label''': ANY(_A )},
] , )
@require_torch
def UpperCamelCase_ ( self : Tuple ):
_UpperCamelCase = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
_UpperCamelCase = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
_UpperCamelCase = pipeline(
'''video-classification''' , model=_A , feature_extractor=_A , frame_sampling_rate=4 )
_UpperCamelCase = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
_UpperCamelCase = video_classifier(_A , top_k=2 )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}] , )
_UpperCamelCase = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_A , decimals=4 ) , [
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5199, '''label''': '''LABEL_0'''}, {'''score''': 0.4801, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def UpperCamelCase_ ( self : List[Any] ):
pass | 705 | import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowerCAmelCase_ ( enum.Enum ):
UpperCAmelCase = 0
UpperCAmelCase = 1
UpperCAmelCase = 2
@add_end_docstrings(__lowercase )
class lowerCAmelCase_ ( __lowercase ):
UpperCAmelCase = "\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n "
def __init__( self : Tuple , *_A : List[str] , **_A : str ):
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
_UpperCamelCase = None
if self.model.config.prefix is not None:
_UpperCamelCase = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
_UpperCamelCase = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = self._sanitize_parameters(prefix=_A , **self._forward_params )
_UpperCamelCase = {**self._preprocess_params, **preprocess_params}
_UpperCamelCase = {**self._forward_params, **forward_params}
def UpperCamelCase_ ( self : Dict , _A : Optional[int]=None , _A : Any=None , _A : Optional[int]=None , _A : List[str]=None , _A : List[Any]=None , _A : int=None , _A : Tuple=None , _A : Optional[Any]=None , **_A : Optional[int] , ):
_UpperCamelCase = {}
if prefix is not None:
_UpperCamelCase = prefix
if prefix:
_UpperCamelCase = self.tokenizer(
_A , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
F"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
_UpperCamelCase = handle_long_generation
preprocess_params.update(_A )
_UpperCamelCase = generate_kwargs
_UpperCamelCase = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
_UpperCamelCase = ReturnType.TENSORS
if return_type is not None:
_UpperCamelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCamelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCamelCase = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
_UpperCamelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ ( self : int , *_A : Union[str, Any] , **_A : Union[str, Any] ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_A , **_A )
def __call__( self : List[str] , _A : str , **_A : Any ):
return super().__call__(_A , **_A )
def UpperCamelCase_ ( self : Optional[Any] , _A : List[str] , _A : int="" , _A : Optional[Any]=None , **_A : Optional[Any] ):
_UpperCamelCase = self.tokenizer(
prefix + prompt_text , padding=_A , add_special_tokens=_A , return_tensors=self.framework )
_UpperCamelCase = prompt_text
if handle_long_generation == "hole":
_UpperCamelCase = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
_UpperCamelCase = generate_kwargs['''max_new_tokens''']
else:
_UpperCamelCase = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
_UpperCamelCase = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
_UpperCamelCase = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def UpperCamelCase_ ( self : Dict , _A : Optional[int] , **_A : str ):
_UpperCamelCase = model_inputs['''input_ids''']
_UpperCamelCase = model_inputs.get('''attention_mask''' , _A )
# Allow empty prompts
if input_ids.shape[1] == 0:
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = 1
else:
_UpperCamelCase = input_ids.shape[0]
_UpperCamelCase = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
_UpperCamelCase = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
_UpperCamelCase = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
_UpperCamelCase = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
_UpperCamelCase = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
_UpperCamelCase = self.model.generate(input_ids=_A , attention_mask=_A , **_A )
_UpperCamelCase = generated_sequence.shape[0]
if self.framework == "pt":
_UpperCamelCase = generated_sequence.reshape(_A , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
_UpperCamelCase = tf.reshape(_A , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def UpperCamelCase_ ( self : List[str] , _A : Dict , _A : Optional[Any]=ReturnType.FULL_TEXT , _A : Dict=True ):
_UpperCamelCase = model_outputs['''generated_sequence'''][0]
_UpperCamelCase = model_outputs['''input_ids''']
_UpperCamelCase = model_outputs['''prompt_text''']
_UpperCamelCase = generated_sequence.numpy().tolist()
_UpperCamelCase = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
_UpperCamelCase = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
_UpperCamelCase = self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
_UpperCamelCase = 0
else:
_UpperCamelCase = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , ) )
if return_type == ReturnType.FULL_TEXT:
_UpperCamelCase = prompt_text + text[prompt_length:]
else:
_UpperCamelCase = text[prompt_length:]
_UpperCamelCase = {'''generated_text''': all_text}
records.append(_A )
return records
| 71 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int, int]:
'''simple docstring'''
if b == 0:
return (1, 0)
((__UpperCAmelCase) , (__UpperCAmelCase)) : Union[str, Any] = extended_euclid(lowercase_ , a % b )
__UpperCAmelCase : List[str] = a // b
return (y, x - k * y)
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
((__UpperCAmelCase) , (__UpperCAmelCase)) : str = extended_euclid(lowercase_ , lowercase_ )
__UpperCAmelCase : str = na * na
__UpperCAmelCase : Union[str, Any] = ra * x * na + ra * y * na
return (n % m + m) % m
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
((__UpperCAmelCase) , (__UpperCAmelCase)) : Tuple = extended_euclid(lowercase_ , lowercase_ )
if b < 0:
__UpperCAmelCase : List[Any] = (b % n + n) % n
return b
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
__UpperCAmelCase , __UpperCAmelCase : List[str] = invert_modulo(lowercase_ , lowercase_ ), invert_modulo(lowercase_ , lowercase_ )
__UpperCAmelCase : Dict = na * na
__UpperCAmelCase : str = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="""chinese_remainder_theorem""", verbose=True)
testmod(name="""chinese_remainder_theorem2""", verbose=True)
testmod(name="""invert_modulo""", verbose=True)
testmod(name="""extended_euclid""", verbose=True)
| 462 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : torch.FloatTensor
_lowerCAmelCase : torch.FloatTensor
_lowerCAmelCase : Optional[torch.FloatTensor] = None
class lowerCamelCase ( _UpperCamelCase , _UpperCamelCase ):
_lowerCAmelCase : Tuple = 2
@register_to_config
def __init__( self , lowercase__ = 0.0_2 , lowercase__ = 1_0_0 , lowercase__ = 1.0_0_7 , lowercase__ = 8_0 , lowercase__ = 0.0_5 , lowercase__ = 5_0 , ):
# standard deviation of the initial noise distribution
__UpperCAmelCase : Union[str, Any] = sigma_max
# setable values
__UpperCAmelCase : int = None
__UpperCAmelCase : np.IntTensor = None
__UpperCAmelCase : torch.FloatTensor = None # sigma(t_i)
def A( self , lowercase__ , lowercase__ = None):
return sample
def A( self , lowercase__ , lowercase__ = None):
__UpperCAmelCase : int = num_inference_steps
__UpperCAmelCase : List[str] = np.arange(0 , self.num_inference_steps)[::-1].copy()
__UpperCAmelCase : Any = torch.from_numpy(lowercase__).to(lowercase__)
__UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
__UpperCAmelCase : Tuple = torch.tensor(lowercase__ , dtype=torch.floataa , device=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ = None):
if self.config.s_min <= sigma <= self.config.s_max:
__UpperCAmelCase : int = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1)
else:
__UpperCAmelCase : int = 0
# sample eps ~ N(0, S_noise^2 * I)
__UpperCAmelCase : List[str] = self.config.s_noise * randn_tensor(sample.shape , generator=lowercase__).to(sample.device)
__UpperCAmelCase : Optional[int] = sigma + gamma * sigma
__UpperCAmelCase : List[Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
__UpperCAmelCase : str = sample_hat + sigma_hat * model_output
__UpperCAmelCase : Tuple = (sample_hat - pred_original_sample) / sigma_hat
__UpperCAmelCase : str = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase__ , derivative=lowercase__ , pred_original_sample=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ = True , ):
__UpperCAmelCase : Any = sample_prev + sigma_prev * model_output
__UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
__UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=lowercase__ , derivative=lowercase__ , pred_original_sample=lowercase__)
def A( self , lowercase__ , lowercase__ , lowercase__):
raise NotImplementedError()
| 462 | 1 |
"""simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE ( A__ ):
UpperCAmelCase_ :Optional[Any] = "encoder-decoder"
UpperCAmelCase_ :Any = True
def __init__( self , **__A ) -> Tuple:
super().__init__(**__A )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
lowerCAmelCase_ :Optional[Any] = kwargs.pop("""encoder""" )
lowerCAmelCase_ :int = encoder_config.pop("""model_type""" )
lowerCAmelCase_ :Optional[int] = kwargs.pop("""decoder""" )
lowerCAmelCase_ :Union[str, Any] = decoder_config.pop("""model_type""" )
from ..auto.configuration_auto import AutoConfig
lowerCAmelCase_ :List[str] = AutoConfig.for_model(__A , **__A )
lowerCAmelCase_ :Optional[int] = AutoConfig.for_model(__A , **__A )
lowerCAmelCase_ :int = True
@classmethod
def __lowerCAmelCase ( cls , __A , __A , **__A ) -> PretrainedConfig:
logger.info("""Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" )
lowerCAmelCase_ :List[str] = True
lowerCAmelCase_ :Optional[Any] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__A )
def __lowerCAmelCase ( self ) -> Tuple:
lowerCAmelCase_ :Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase_ :str = self.encoder.to_dict()
lowerCAmelCase_ :Tuple = self.decoder.to_dict()
lowerCAmelCase_ :Tuple = self.__class__.model_type
return output
| 256 |
"""simple docstring"""
__UpperCAmelCase = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(10_00_00)]
def _snake_case ( lowercase__ : int ) -> int:
'''simple docstring'''
lowerCAmelCase_ :str = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
__UpperCAmelCase = [None] * 10_00_00_00
__UpperCAmelCase = True
__UpperCAmelCase = False
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
lowerCAmelCase_ :Optional[int] = chain(next_number(lowercase__ ) )
lowerCAmelCase_ :Tuple = number_chain
while number < 1_0_0_0_0_0_0_0:
lowerCAmelCase_ :List[Any] = number_chain
number *= 1_0
return number_chain
def _snake_case ( lowercase__ : int = 1_0_0_0_0_0_0_0 ) -> int:
'''simple docstring'''
for i in range(1 , lowercase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowercase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 256 | 1 |
import unittest
from transformers import AutoTokenizer, FalconConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
)
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=3 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ) -> Optional[int]:
'''simple docstring'''
_lowercase =parent
_lowercase =batch_size
_lowercase =seq_length
_lowercase =is_training
_lowercase =use_input_mask
_lowercase =use_token_type_ids
_lowercase =use_labels
_lowercase =vocab_size
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =max_position_embeddings
_lowercase =type_vocab_size
_lowercase =type_sequence_label_size
_lowercase =initializer_range
_lowercase =num_labels
_lowercase =num_choices
_lowercase =scope
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowercase =None
if self.use_input_mask:
_lowercase =random_attention_mask([self.batch_size, self.seq_length] )
_lowercase =None
_lowercase =None
_lowercase =None
_lowercase =None
if self.use_labels:
_lowercase =ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowercase =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowercase =ids_tensor([self.batch_size] , self.num_choices )
_lowercase =self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A__ ( self ) -> Any:
'''simple docstring'''
return FalconConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=_SCREAMING_SNAKE_CASE , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =FalconModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowercase =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
_lowercase =model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
_lowercase =True
_lowercase =FalconModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowercase =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , )
_lowercase =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , )
_lowercase =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
_lowercase =FalconForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowercase =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , ) -> List[str]:
'''simple docstring'''
_lowercase =True
_lowercase =True
_lowercase =FalconForCausalLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
# first forward pass
_lowercase =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , )
_lowercase =outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
_lowercase =ids_tensor((self.batch_size, 3) , config.vocab_size )
_lowercase =ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
_lowercase =torch.cat([input_ids, next_tokens] , dim=-1 )
_lowercase =torch.cat([input_mask, next_mask] , dim=-1 )
_lowercase =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )['hidden_states'][0]
_lowercase =model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , encoder_attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , )['hidden_states'][0]
# select random slice
_lowercase =ids_tensor((1,) , output_from_past.shape[-1] ).item()
_lowercase =output_from_no_past[:, -3:, random_slice_idx].detach()
_lowercase =output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=1e-3 ) )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase =self.prepare_config_and_inputs()
(
_lowercase
) =config_and_inputs
_lowercase ={'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (
(
FalconModel,
FalconForCausalLM,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconForQuestionAnswering,
)
if is_torch_available()
else ()
)
_a = (FalconForCausalLM,) if is_torch_available() else ()
_a = (
{
'''feature-extraction''': FalconModel,
'''text-classification''': FalconForSequenceClassification,
'''text-generation''': FalconForCausalLM,
'''question-answering''': FalconForQuestionAnswering,
'''token-classification''': FalconForTokenClassification,
'''zero-shot''': FalconForSequenceClassification,
}
if is_torch_available()
else {}
)
_a = False
_a = False
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =FalconModelTester(self )
_lowercase =ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs()
for alibi in [True, False]:
_lowercase =alibi
self.model_tester.create_and_check_model(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase =input_dict['input_ids']
_lowercase =input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
_lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase =FalconForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowercase =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase ='single_label_classification'
_lowercase =input_dict['input_ids']
_lowercase =input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
_lowercase =ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size )
_lowercase =FalconForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowercase =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A__ ( self ) -> Any:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =input_dict['input_ids']
_lowercase =FalconForCausalLM(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowercase =model(_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE )
_lowercase =input_ids.shape[0]
_lowercase =model._convert_to_rw_cache(result.past_key_values )
_lowercase =model._convert_cache_to_standard_format(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for layer in range(len(_SCREAMING_SNAKE_CASE ) ):
for tensor_idx in range(2 ):
self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 )
self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 )
self.assertTrue(
torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =3
_lowercase ='multi_label_classification'
_lowercase =input_dict['input_ids']
_lowercase =input_ids.ne(1 ).to(_SCREAMING_SNAKE_CASE )
_lowercase =ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float )
_lowercase =FalconForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
_lowercase =model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) )
def A__ ( self ) -> Tuple:
'''simple docstring'''
for model_class in self.all_generative_model_classes:
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
# If it doesn't support cache, pass the test
if not hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
return
_lowercase =model_class(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
if "use_cache" not in inputs:
_lowercase =True
_lowercase =model(**_SCREAMING_SNAKE_CASE )
# If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format)
if "past_key_values" not in outputs:
return
_lowercase =(
getattr(_SCREAMING_SNAKE_CASE , 'decoder_layers' , _SCREAMING_SNAKE_CASE )
or getattr(_SCREAMING_SNAKE_CASE , 'num_decoder_layers' , _SCREAMING_SNAKE_CASE )
or config.num_hidden_layers
)
_lowercase =getattr(_SCREAMING_SNAKE_CASE , 'num_kv_heads' , config.num_attention_heads )
_lowercase =getattr(_SCREAMING_SNAKE_CASE , 'd_model' , config.hidden_size )
_lowercase =embed_dim // num_attention_heads
_lowercase =outputs['past_key_values']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
_lowercase =inputs['input_ids'].shape
for i in range(_SCREAMING_SNAKE_CASE ):
if config.new_decoder_architecture:
_lowercase =config.num_attention_heads
elif config.multi_query:
_lowercase =1
self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2
self.assertEqual(
past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
self.assertEqual(
past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def A__ ( self ) -> Union[str, Any]:
'''simple docstring'''
_lowercase =AutoTokenizer.from_pretrained('Rocketknight1/falcon-rw-1b' )
_lowercase =FalconForCausalLM.from_pretrained('Rocketknight1/falcon-rw-1b' )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
_lowercase =tokenizer('My favorite food is' , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
_lowercase =(
'My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.'
)
_lowercase =model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=19 )
_lowercase =tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )[0]
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def A__ ( self ) -> str:
'''simple docstring'''
for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]:
_lowercase =AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowercase =FalconForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.eval()
model.to(_SCREAMING_SNAKE_CASE )
_lowercase =tokenizer('My favorite food is' , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# We just test that these run without errors - the models are randomly initialized
# and so the actual text outputs will be garbage
model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=4 )
model.generate(**_SCREAMING_SNAKE_CASE , num_beams=2 , max_new_tokens=4 )
@slow
def A__ ( self ) -> int:
'''simple docstring'''
with torch.no_grad():
for repo in [
"Rocketknight1/falcon-rw-1b",
"Rocketknight1/tiny-random-falcon-7b",
"Rocketknight1/tiny-random-falcon-40b",
]:
_lowercase =AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_lowercase =FalconForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.eval()
model.to(device=_SCREAMING_SNAKE_CASE )
_lowercase =tokenizer('My favorite food is' , return_tensors='pt' ).to(_SCREAMING_SNAKE_CASE )
# Test results are the same with and without cache
_lowercase =model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=_SCREAMING_SNAKE_CASE )
_lowercase =model.generate(**_SCREAMING_SNAKE_CASE , do_sample=_SCREAMING_SNAKE_CASE , max_new_tokens=20 , use_cache=_SCREAMING_SNAKE_CASE )
self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
| 291 |
from __future__ import annotations
lowerCAmelCase : List[Any] = list[list[int]]
# assigning initial values to the grid
lowerCAmelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
lowerCAmelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def A_ ( a , a , a , a ):
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def A_ ( a ):
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def A_ ( a ):
"""simple docstring"""
if location := find_empty_location(a ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Any = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 1_0 ):
if is_safe(a , a , a , a ):
SCREAMING_SNAKE_CASE_ : List[str] = digit
if sudoku(a ) is not None:
return grid
SCREAMING_SNAKE_CASE_ : List[Any] = 0
return None
def A_ ( a ):
"""simple docstring"""
for row in grid:
for cell in row:
print(a , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
lowerCAmelCase : Any = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 511 | 0 |
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
return number | (1 << position)
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
return number & ~(1 << position)
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
return number ^ (1 << position)
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->bool:
return ((number >> position) & 1) == 1
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __lowercase :
def __init__( self : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any]=1_3 , __lowerCamelCase : Optional[Any]=3_0 , __lowerCamelCase : Any=2 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : str=True , __lowerCamelCase : str=True , __lowerCamelCase : Union[str, Any]=3_2 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : Union[str, Any]=3_7 , __lowerCamelCase : str="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : str=1_0 , __lowerCamelCase : Union[str, Any]=0.02 , __lowerCamelCase : List[str]=3 , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : Any=2 , ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = is_training
UpperCAmelCase = use_labels
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = initializer_range
UpperCAmelCase = scope
UpperCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase = (image_size // patch_size) ** 2
UpperCAmelCase = num_patches + 2
def _lowercase ( self : Tuple ) -> int:
"""simple docstring"""
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Tuple ) -> str:
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowerCamelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowercase ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = TFDeiTModel(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , __lowerCamelCase : int , __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForMaskedImageModeling(config=__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForMaskedImageModeling(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict , __lowerCamelCase : int ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.type_sequence_label_size
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase = 1
UpperCAmelCase = TFDeiTForImageClassification(__lowerCamelCase )
UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase = model(__lowerCamelCase , labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Tuple ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __lowercase ( __snake_case , __snake_case , unittest.TestCase ):
UpperCamelCase = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
UpperCamelCase = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def _lowercase ( self : str ) -> str:
"""simple docstring"""
UpperCAmelCase = TFDeiTModelTester(self )
UpperCAmelCase = ConfigTester(self , config_class=__lowerCamelCase , has_text_modality=__lowerCamelCase , hidden_size=3_7 )
def _lowercase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def _lowercase ( self : List[Any] ) -> Dict:
"""simple docstring"""
pass
def _lowercase ( self : str ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase , tf.keras.layers.Dense ) )
def _lowercase ( self : Optional[Any] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__lowerCamelCase )
UpperCAmelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowerCamelCase )
def _lowercase ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def _lowercase ( self : Any ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
def _lowercase ( self : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=False ) -> int:
"""simple docstring"""
UpperCAmelCase = super()._prepare_for_class(__lowerCamelCase , __lowerCamelCase , return_labels=__lowerCamelCase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def _lowercase ( self : Optional[int] ) -> str:
"""simple docstring"""
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase = TFDeiTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def _UpperCamelCase ( ) ->Tuple:
UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __lowercase ( unittest.TestCase ):
@cached_property
def _lowercase ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
UpperCAmelCase = self.default_image_processor
UpperCAmelCase = prepare_img()
UpperCAmelCase = image_processor(images=__lowerCamelCase , return_tensors="""tf""" )
# forward pass
UpperCAmelCase = model(**__lowerCamelCase )
# verify the logits
UpperCAmelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __lowerCamelCase )
UpperCAmelCase = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __lowerCamelCase , atol=1e-4 ) )
| 627 | 0 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {'vocab_file': 'spiece.model'}
SCREAMING_SNAKE_CASE_ = {
'vocab_file': {
'AI-Sweden/gpt-sw3-126m': 'https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-350m': 'https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-1.6b': 'https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-6.7b': 'https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model',
'AI-Sweden/gpt-sw3-20b': 'https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model',
}
}
SCREAMING_SNAKE_CASE_ = {
'AI-Sweden/gpt-sw3-126m': 2048,
'AI-Sweden/gpt-sw3-350m': 2048,
'AI-Sweden/gpt-sw3-1.6b': 2048,
'AI-Sweden/gpt-sw3-6.7b': 2048,
'AI-Sweden/gpt-sw3-20b': 2048,
}
class snake_case_ ( lowerCamelCase_ ):
"""simple docstring"""
A_ = VOCAB_FILES_NAMES
A_ = PRETRAINED_VOCAB_FILES_MAP
A_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase_ , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=False , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> None:
UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
UpperCamelCase = kwargs.get('''name_or_path''')
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''')
UpperCamelCase = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
UpperCamelCase = '''<|endoftext|>''' if eos_token is None else eos_token
UpperCamelCase = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
UpperCamelCase = unk_token if pad_token is None else pad_token
UpperCamelCase = eos_token if bos_token is None else bos_token
else:
UpperCamelCase = '''<pad>''' if pad_token is None else pad_token
UpperCamelCase = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=lowerCamelCase_ , remove_space=lowerCamelCase_ , keep_accents=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , unk_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase_ , )
UpperCamelCase = do_lower_case
UpperCamelCase = remove_space
UpperCamelCase = keep_accents
UpperCamelCase = vocab_file
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(lowerCamelCase_)
# Used for whitespace normalization in input texts
# fmt : off
UpperCamelCase = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
UpperCamelCase = re.compile(
F'[{"".join(map(lowerCamelCase_ , list(range(0 , 9)) + list(range(1_1 , 3_2)) + list(range(1_2_7 , 1_6_0)) + [1_6_0, 1_7_3, 8_2_0_3]))}]')
def __getstate__( self) -> Union[str, Any]:
UpperCamelCase = self.__dict__.copy()
UpperCamelCase = None
return state
def __setstate__( self , lowerCamelCase_) -> List[str]:
UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs'''):
UpperCamelCase = {}
UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs)
self.sp_model.Load(self.vocab_file)
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def UpperCAmelCase__ ( self) -> int:
return len(self.sp_model)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
UpperCamelCase = self.non_printing_characters_re.sub('''''' , lowerCamelCase_)
# Normalize whitespaces
UpperCamelCase = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text])
# NFC Unicode normalization
UpperCamelCase = unicodedata.normalize('''NFC''' , lowerCamelCase_)
return text
def UpperCAmelCase__ ( self , lowerCamelCase_ , **lowerCamelCase_) -> List[str]:
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
return self.sp_model.encode(lowerCamelCase_ , out_type=lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> int:
return self.sp_model.PieceToId(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return self.sp_model.IdToPiece(lowerCamelCase_)
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase_) -> str:
return out_string
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
UpperCamelCase = []
UpperCamelCase = ''''''
UpperCamelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(lowerCamelCase_) + token
UpperCamelCase = True
UpperCamelCase = []
else:
current_sub_tokens.append(lowerCamelCase_)
UpperCamelCase = False
out_string += self.sp_model.decode(lowerCamelCase_)
return out_string
def UpperCAmelCase__ ( self) -> Dict[str, int]:
UpperCamelCase = {self.convert_ids_to_tokens(lowerCamelCase_): i for i in range(self.vocab_size)}
vocab.update(self.added_tokens_encoder)
return vocab
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = None) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase_):
logger.error(F'Vocabulary path ({save_directory}) should be a directory')
return
UpperCamelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCamelCase_) and os.path.isfile(self.vocab_file):
copyfile(self.vocab_file , lowerCamelCase_)
elif not os.path.isfile(self.vocab_file):
with open(lowerCamelCase_ , '''wb''') as fi:
UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase_)
return (out_vocab_file,)
def UpperCAmelCase__ ( self , lowerCamelCase_ , lowerCamelCase_ = False) -> Union[List[int], List[List[int]], "torch.Tensor"]:
if isinstance(lowerCamelCase_ , lowerCamelCase_):
UpperCamelCase = self.preprocess_text(lowerCamelCase_)
UpperCamelCase = self.sp_model.encode(lowerCamelCase_)
else:
UpperCamelCase = [self.preprocess_text(lowerCamelCase_) for t in text]
UpperCamelCase = self.sp_model.encode(lowerCamelCase_)
if return_tensors is True or return_tensors == "pt":
UpperCamelCase = torch.tensor(lowerCamelCase_)
return token_ids
def UpperCAmelCase__ ( self , lowerCamelCase_) -> str:
return self.sp_model.decode(lowerCamelCase_)
def UpperCAmelCase__ ( self , lowerCamelCase_) -> List[int]:
UpperCamelCase = [F'User: {text}' if is_user else F'Bot: {text}' for is_user, text in conversation.iter_texts()]
UpperCamelCase = (
F'{self.eos_token}{self.bos_token}' + F'{self.bos_token}'.join(lowerCamelCase_) + F'{self.bos_token}Bot:'
)
return self.encode(text=lowerCamelCase_) | 34 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class _snake_case :
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=3 , _lowerCamelCase=4 , _lowerCamelCase=None , ):
UpperCAmelCase__ : Optional[int] = parent
UpperCAmelCase__ : str = batch_size
UpperCAmelCase__ : Tuple = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : Optional[Any] = use_input_mask
UpperCAmelCase__ : Union[str, Any] = use_token_type_ids
UpperCAmelCase__ : Optional[Any] = use_labels
UpperCAmelCase__ : Tuple = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : int = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Any = intermediate_size
UpperCAmelCase__ : Optional[Any] = hidden_act
UpperCAmelCase__ : Optional[Any] = hidden_dropout_prob
UpperCAmelCase__ : Dict = attention_probs_dropout_prob
UpperCAmelCase__ : int = max_position_embeddings
UpperCAmelCase__ : int = type_vocab_size
UpperCAmelCase__ : Optional[int] = type_sequence_label_size
UpperCAmelCase__ : Any = initializer_range
UpperCAmelCase__ : Any = num_labels
UpperCAmelCase__ : Optional[int] = num_choices
UpperCAmelCase__ : Dict = scope
def snake_case__ ( self):
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
UpperCAmelCase__ : Tuple = None
if self.use_input_mask:
UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length])
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
UpperCAmelCase__ : List[Any] = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : Optional[Any] = None
if self.use_labels:
UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size)
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.num_choices)
UpperCAmelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self):
return NystromformerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : Dict = NystromformerModel(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase)
UpperCAmelCase__ : Optional[Any] = model(_lowerCamelCase , token_type_ids=_lowerCamelCase)
UpperCAmelCase__ : List[Any] = model(_lowerCamelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : str = NystromformerForMaskedLM(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[str] = NystromformerForQuestionAnswering(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Dict = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , start_positions=_lowerCamelCase , end_positions=_lowerCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = NystromformerForSequenceClassification(_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : str = NystromformerForTokenClassification(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : List[str] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def snake_case__ ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
UpperCAmelCase__ : int = self.num_choices
UpperCAmelCase__ : Any = NystromformerForMultipleChoice(config=_lowerCamelCase)
model.to(_lowerCamelCase)
model.eval()
UpperCAmelCase__ : List[str] = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase__ : List[str] = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase__ : List[str] = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous()
UpperCAmelCase__ : Union[str, Any] = model(
_lowerCamelCase , attention_mask=_lowerCamelCase , token_type_ids=_lowerCamelCase , labels=_lowerCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = config_and_inputs
UpperCAmelCase__ : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class _snake_case ( a__ , a__ , unittest.TestCase ):
lowerCAmelCase :int = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase :List[str] = (
{
'''feature-extraction''': NystromformerModel,
'''fill-mask''': NystromformerForMaskedLM,
'''question-answering''': NystromformerForQuestionAnswering,
'''text-classification''': NystromformerForSequenceClassification,
'''token-classification''': NystromformerForTokenClassification,
'''zero-shot''': NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase :int = False
lowerCAmelCase :List[str] = False
def snake_case__ ( self):
UpperCAmelCase__ : str = NystromformerModelTester(self)
UpperCAmelCase__ : Optional[Any] = ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37)
def snake_case__ ( self):
self.config_tester.run_common_tests()
def snake_case__ ( self):
UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Dict = type
self.model_tester.create_and_check_model(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase)
def snake_case__ ( self):
UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase)
@slow
def snake_case__ ( self):
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : Union[str, Any] = NystromformerModel.from_pretrained(_lowerCamelCase)
self.assertIsNotNone(_lowerCamelCase)
@require_torch
class _snake_case ( unittest.TestCase ):
@slow
def snake_case__ ( self):
UpperCAmelCase__ : int = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""")
UpperCAmelCase__ : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]])
with torch.no_grad():
UpperCAmelCase__ : Optional[int] = model(_lowerCamelCase)[0]
UpperCAmelCase__ : List[str] = torch.Size((1, 6, 768))
self.assertEqual(output.shape , _lowerCamelCase)
UpperCAmelCase__ : Union[str, Any] = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowerCamelCase , atol=1e-4))
@slow
def snake_case__ ( self):
UpperCAmelCase__ : Optional[int] = """the [MASK] of Belgium is Brussels"""
UpperCAmelCase__ : Tuple = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""")
UpperCAmelCase__ : Optional[int] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""")
UpperCAmelCase__ : Any = tokenizer(_lowerCamelCase , return_tensors="""pt""")
with torch.no_grad():
UpperCAmelCase__ : Union[str, Any] = model(encoding.input_ids).logits
UpperCAmelCase__ : Tuple = token_logits[:, 2, :].argmax(-1)[0]
self.assertEqual(tokenizer.decode(_lowerCamelCase) , """capital""") | 407 | 0 |
'''simple docstring'''
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class lowerCamelCase ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
@register_to_config
def __init__( self : Any , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : float , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : int , __snake_case : str , __snake_case : bool = False , ):
'''simple docstring'''
super().__init__()
_snake_case: Union[str, Any] = nn.Embedding(__snake_case , __snake_case )
_snake_case: List[Any] = nn.Embedding(__snake_case , __snake_case )
_snake_case: Dict = False
_snake_case: str = nn.Dropout(p=__snake_case )
_snake_case: Optional[int] = TaConfig(
vocab_size=__snake_case , d_model=__snake_case , num_heads=__snake_case , d_kv=__snake_case , d_ff=__snake_case , dropout_rate=__snake_case , feed_forward_proj=__snake_case , is_decoder=__snake_case , is_encoder_decoder=__snake_case , )
_snake_case: Dict = nn.ModuleList()
for lyr_num in range(__snake_case ):
_snake_case: Optional[Any] = TaBlock(__snake_case )
self.encoders.append(__snake_case )
_snake_case: Tuple = TaLayerNorm(__snake_case )
_snake_case: Union[str, Any] = nn.Dropout(p=__snake_case )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , __snake_case : Dict , __snake_case : Optional[Any] ):
'''simple docstring'''
_snake_case: int = self.token_embedder(__snake_case )
_snake_case: Optional[Any] = encoder_input_tokens.shape[1]
_snake_case: Optional[int] = torch.arange(__snake_case , device=encoder_input_tokens.device )
x += self.position_encoding(__snake_case )
_snake_case: List[str] = self.dropout_pre(__snake_case )
# inverted the attention mask
_snake_case: int = encoder_input_tokens.size()
_snake_case: Dict = self.get_extended_attention_mask(__snake_case , __snake_case )
for lyr in self.encoders:
_snake_case: Any = lyr(__snake_case , __snake_case )[0]
_snake_case: Union[str, Any] = self.layer_norm(__snake_case )
return self.dropout_post(__snake_case ), encoder_inputs_mask
| 273 |
'''simple docstring'''
from __future__ import annotations
import os
from collections.abc import Mapping
A : Optional[Any] = tuple[int, int]
class lowerCamelCase :
def __init__( self : Tuple , __snake_case : set[int] , __snake_case : Mapping[EdgeT, int] ):
'''simple docstring'''
_snake_case: set[int] = vertices
_snake_case: dict[EdgeT, int] = {
(min(__snake_case ), max(__snake_case )): weight for edge, weight in edges.items()
}
def SCREAMING_SNAKE_CASE_ ( self : int , __snake_case : EdgeT , __snake_case : int ):
'''simple docstring'''
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
_snake_case: Dict = weight
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
_snake_case: Graph = Graph({min(self.vertices )} , {} )
_snake_case: EdgeT
_snake_case: int
_snake_case: EdgeT
_snake_case: int
while len(subgraph.vertices ) < len(self.vertices ):
_snake_case: List[str] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
_snake_case: Optional[Any] = edge
_snake_case: Optional[int] = weight
subgraph.add_edge(__snake_case , __snake_case )
return subgraph
def lowercase_ ( lowercase__ = "p107_network.txt" ) ->int:
_snake_case: str = os.path.abspath(os.path.dirname(lowercase__ ) )
_snake_case: str = os.path.join(lowercase__ , lowercase__ )
_snake_case: dict[EdgeT, int] = {}
_snake_case: list[str]
_snake_case: int
_snake_case: int
with open(lowercase__ ) as f:
_snake_case: Tuple = f.read().strip().split('\n' )
_snake_case: Tuple = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
_snake_case: int = int(adjaceny_matrix[edgea][edgea] )
_snake_case: Graph = Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
_snake_case: Graph = graph.prims_algorithm()
_snake_case: int = sum(graph.edges.values() )
_snake_case: int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F'{solution() = }')
| 273 | 1 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def lowercase__ ( lowercase_ ,lowercase_ ,lowercase_ ,lowercase_ ) -> List[str]:
"""simple docstring"""
_UpperCamelCase : Union[str, Any] = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, nicht wahr?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
_UpperCamelCase : Any = {
"wmt16-en-de-dist-12-1": [28.3, 27.52],
"wmt16-en-de-dist-6-1": [27.4, 27.11],
"wmt16-en-de-12-1": [26.9, 25.75],
}
_UpperCamelCase : List[str] = F'''{src_lang}-{tgt_lang}'''
_UpperCamelCase : Optional[Any] = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "allenai/{model_name}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don\'t use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
'''
model_card_dir.mkdir(parents=lowercase_ ,exist_ok=lowercase_ )
_UpperCamelCase : List[Any] = os.path.join(lowercase_ ,"README.md" )
print(F'''Generating {path}''' )
with open(lowercase_ ,"w" ,encoding="utf-8" ) as f:
f.write(lowercase_ )
# make sure we are under the root of the project
lowerCamelCase__ = Path(__file__).resolve().parent.parent.parent
lowerCamelCase__ = repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCamelCase__ = model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 624 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def lowercase__ ( lowercase_ ,lowercase_=10 ) -> Tuple:
"""simple docstring"""
_UpperCamelCase : str = []
for _ in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def lowercase__ ( lowercase_ ,lowercase_=10 ) -> str:
"""simple docstring"""
_UpperCamelCase : Optional[int] = []
for step in range(lowercase_ ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCamelCase : List[str] = os.path.join(lowercase_ ,"schedule.bin" )
torch.save(scheduler.state_dict() ,lowercase_ )
_UpperCamelCase : Optional[int] = torch.load(lowercase_ )
scheduler.load_state_dict(lowercase_ )
return lrs
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Dict , __a : Any , __a : List[str] ) -> List[str]:
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> str:
_UpperCamelCase : Optional[int] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
_UpperCamelCase : List[Any] = torch.tensor([0.4, 0.2, -0.5] )
_UpperCamelCase : Any = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCamelCase : Dict = AdamW(params=[w] , lr=2e-1 , weight_decay=0.0 )
for _ in range(100 ):
_UpperCamelCase : List[Any] = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
def __SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
_UpperCamelCase : List[Any] = torch.tensor([0.1, -0.2, -0.1] , requires_grad=__a )
_UpperCamelCase : Any = torch.tensor([0.4, 0.2, -0.5] )
_UpperCamelCase : List[str] = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
_UpperCamelCase : Dict = Adafactor(
params=[w] , lr=1e-2 , eps=(1e-3_0, 1e-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=__a , weight_decay=0.0 , relative_step=__a , scale_parameter=__a , warmup_init=__a , )
for _ in range(1000 ):
_UpperCamelCase : Optional[Any] = criterion(__a , __a )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1e-2 )
@require_torch
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = nn.Linear(50 , 50 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ :List[Any] = AdamW(m.parameters() , lr=10.0 ) if is_torch_available() else None
SCREAMING_SNAKE_CASE__ :List[str] = 10
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : List[Any] , __a : Dict , __a : str=None ) -> List[Any]:
self.assertEqual(len(__a ) , len(__a ) )
for a, b in zip(__a , __a ):
self.assertAlmostEqual(__a , __a , delta=__a , msg=__a )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Union[str, Any] = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
_UpperCamelCase : Optional[Any] = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1e-7},
[0.0, 5.0, 10.0, 7.6_56, 5.6_25, 3.9_06, 2.5, 1.4_06, 0.6_25, 0.1_56],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.1_65, 7.0_71, 6.3_25, 5.7_74, 5.3_45, 5.0, 4.7_14],
),
}
for scheduler_func, data in scheds.items():
_UpperCamelCase, _UpperCamelCase : List[str] = data
_UpperCamelCase : List[str] = scheduler_func(self.optimizer , **__a )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
_UpperCamelCase : Optional[int] = unwrap_schedule(__a , self.num_steps )
self.assertListAlmostEqual(
__a , __a , tol=1e-2 , msg=F'''failed for {scheduler_func} in normal scheduler''' , )
_UpperCamelCase : List[str] = scheduler_func(self.optimizer , **__a )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(__a ) # wrap to test picklability of the schedule
_UpperCamelCase : int = unwrap_and_save_reload_schedule(__a , self.num_steps )
self.assertListEqual(__a , __a , msg=F'''failed for {scheduler_func} in save and reload''' )
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : List[Any] , __a : Optional[int] ) -> Tuple:
_UpperCamelCase : Any = fn
def __call__( self : Tuple , *__a : Optional[Any] , **__a : str ) -> Any:
return self.fn(*__a , **__a )
@classmethod
def __SCREAMING_SNAKE_CASE ( self : Any , __a : Any ) -> Tuple:
_UpperCamelCase : Any = list(map(self , scheduler.lr_lambdas ) )
| 624 | 1 |
"""simple docstring"""
# flake8: noqa
# Lint as: python3
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = {}
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None , )-> Tuple:
"""simple docstring"""
UpperCamelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})" )
UpperCamelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})" )
UpperCamelCase = format_type
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = None )-> Any:
"""simple docstring"""
UpperCamelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
UpperCamelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=["""python"""])
_register_formatter(ArrowFormatter, """arrow""", aliases=["""pa""", """pyarrow"""])
_register_formatter(NumpyFormatter, """numpy""", aliases=["""np"""])
_register_formatter(PandasFormatter, """pandas""", aliases=["""pd"""])
_register_formatter(CustomFormatter, """custom""")
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, """torch""", aliases=["""pt""", """pytorch"""])
else:
SCREAMING_SNAKE_CASE = ValueError("""PyTorch needs to be installed to be able to return PyTorch tensors.""")
_register_unavailable_formatter(_torch_error, """torch""", aliases=["""pt""", """pytorch"""])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, """tensorflow""", aliases=["""tf"""])
else:
SCREAMING_SNAKE_CASE = ValueError("""Tensorflow needs to be installed to be able to return Tensorflow tensors.""")
_register_unavailable_formatter(_tf_error, """tensorflow""", aliases=["""tf"""])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, """jax""", aliases=[])
else:
SCREAMING_SNAKE_CASE = ValueError("""JAX needs to be installed to be able to return JAX arrays.""")
_register_unavailable_formatter(_jax_error, """jax""", aliases=[])
def lowerCamelCase__ ( UpperCAmelCase_ )-> Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def lowerCamelCase__ ( UpperCAmelCase_ , **UpperCAmelCase_ )-> Formatter:
"""simple docstring"""
UpperCamelCase = get_format_type_from_alias(UpperCAmelCase_ )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**UpperCAmelCase_ )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'" )
| 556 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass(frozen=_lowerCAmelCase )
class __a :
UpperCamelCase_ : str
UpperCamelCase_ : str
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
UpperCamelCase_ : Optional[str] = None
@dataclass(frozen=_lowerCAmelCase )
class __a :
UpperCamelCase_ : List[int]
UpperCamelCase_ : Optional[List[int]] = None
UpperCamelCase_ : Optional[List[int]] = None
UpperCamelCase_ : Optional[Union[int, float]] = None
UpperCamelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class __a ( _lowerCAmelCase ):
UpperCamelCase_ : List[InputFeatures]
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : bool = False , )-> Dict:
"""simple docstring"""
UpperCamelCase = hans_processors[task]()
UpperCamelCase = os.path.join(
UpperCAmelCase_ , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(UpperCAmelCase_ ) , UpperCAmelCase_ , ) , )
UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase = label_list[2], label_list[1]
UpperCamelCase = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCamelCase = cached_features_file + ".lock"
with FileLock(UpperCAmelCase_ ):
if os.path.exists(UpperCAmelCase_ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
UpperCamelCase = torch.load(UpperCAmelCase_ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
UpperCamelCase = (
processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
)
logger.info("Training examples: %s" , len(UpperCAmelCase_ ) )
UpperCamelCase = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
logger.info("Saving features into cached file %s" , UpperCAmelCase_ )
torch.save(self.features , UpperCAmelCase_ )
def __len__( self : Optional[Any] )-> List[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Optional[Any] , UpperCAmelCase_ : Any )-> InputFeatures:
"""simple docstring"""
return self.features[i]
def _SCREAMING_SNAKE_CASE ( self : str )-> List[Any]:
"""simple docstring"""
return self.label_list
if is_tf_available():
import tensorflow as tf
class __a :
UpperCamelCase_ : List[InputFeatures]
def __init__( self : int , UpperCAmelCase_ : str , UpperCAmelCase_ : PreTrainedTokenizer , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = 128 , UpperCAmelCase_ : Optional[Any]=False , UpperCAmelCase_ : bool = False , )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = hans_processors[task]()
UpperCamelCase = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCamelCase , UpperCamelCase = label_list[2], label_list[1]
UpperCamelCase = label_list
UpperCamelCase = processor.get_dev_examples(UpperCAmelCase_ ) if evaluate else processor.get_train_examples(UpperCAmelCase_ )
UpperCamelCase = hans_convert_examples_to_features(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 10_000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(UpperCAmelCase_ )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCamelCase = tf.data.Dataset.from_generator(
UpperCAmelCase_ , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def _SCREAMING_SNAKE_CASE ( self : Tuple )-> Tuple:
"""simple docstring"""
return self.dataset
def __len__( self : List[Any] )-> List[Any]:
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , UpperCAmelCase_ : List[str] )-> InputFeatures:
"""simple docstring"""
return self.features[i]
def _SCREAMING_SNAKE_CASE ( self : List[str] )-> List[str]:
"""simple docstring"""
return self.label_list
class __a ( _lowerCAmelCase ):
def _SCREAMING_SNAKE_CASE ( self : str , UpperCAmelCase_ : Tuple )-> Tuple:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , "heuristics_train_set.txt" ) ) , "train" )
def _SCREAMING_SNAKE_CASE ( self : int , UpperCAmelCase_ : List[str] )-> Dict:
"""simple docstring"""
return self._create_examples(self._read_tsv(os.path.join(UpperCAmelCase_ , "heuristics_evaluation_set.txt" ) ) , "dev" )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] )-> Optional[Any]:
"""simple docstring"""
return ["contradiction", "entailment", "neutral"]
def _SCREAMING_SNAKE_CASE ( self : Dict , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str )-> str:
"""simple docstring"""
UpperCamelCase = []
for i, line in enumerate(UpperCAmelCase_ ):
if i == 0:
continue
UpperCamelCase = "%s-%s" % (set_type, line[0])
UpperCamelCase = line[5]
UpperCamelCase = line[6]
UpperCamelCase = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCamelCase = line[0]
examples.append(InputExample(guid=UpperCAmelCase_ , text_a=UpperCAmelCase_ , text_b=UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
return examples
def lowerCamelCase__ ( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )-> Union[str, Any]:
"""simple docstring"""
UpperCamelCase = {label: i for i, label in enumerate(UpperCAmelCase_ )}
UpperCamelCase = []
for ex_index, example in tqdm.tqdm(enumerate(UpperCAmelCase_ ) , desc="convert examples to features" ):
if ex_index % 1_00_00 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCamelCase = tokenizer(
example.text_a , example.text_b , add_special_tokens=UpperCAmelCase_ , max_length=UpperCAmelCase_ , padding="max_length" , truncation=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , )
UpperCamelCase = label_map[example.label] if example.label in label_map else 0
UpperCamelCase = int(example.pairID )
features.append(InputFeatures(**UpperCAmelCase_ , label=UpperCAmelCase_ , pairID=UpperCAmelCase_ ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(F"guid: {example}" )
logger.info(F"features: {features[i]}" )
return features
SCREAMING_SNAKE_CASE = {
"""hans""": 3,
}
SCREAMING_SNAKE_CASE = {
"""hans""": HansProcessor,
}
| 556 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__UpperCamelCase : List[Any] = {
'configuration_speech_to_text': ['SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Speech2TextConfig'],
'processing_speech_to_text': ['Speech2TextProcessor'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[Any] = ['Speech2TextTokenizer']
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = ['Speech2TextFeatureExtractor']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFSpeech2TextForConditionalGeneration',
'TFSpeech2TextModel',
'TFSpeech2TextPreTrainedModel',
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Optional[int] = [
'SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'Speech2TextForConditionalGeneration',
'Speech2TextModel',
'Speech2TextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__UpperCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 448 | from math import factorial
def A__ ( snake_case_ : int , snake_case_ : int ):
# If either of the conditions are true, the function is being asked
# to calculate a factorial of a negative number, which is not possible
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(snake_case_ ) // (factorial(snake_case_ ) * factorial(n - k ))
if __name__ == "__main__":
print(
'The number of five-card hands possible from a standard',
f'''fifty-two card deck is: {combinations(5_2, 5)}\n''',
)
print(
'If a class of 40 students must be arranged into groups of',
f'''4 for group projects, there are {combinations(4_0, 4)} ways''',
'to arrange them.\n',
)
print(
'If 10 teams are competing in a Formula One race, there',
f'''are {combinations(1_0, 3)} ways that first, second and''',
'third place can be awarded.',
)
| 64 | 0 |
from dataclasses import dataclass, field
from typing import Optional
from transformers import AutoConfig, AutoImageProcessor, AutoTokenizer, FlaxVisionEncoderDecoderModel, HfArgumentParser
@dataclass
class _a:
lowerCamelCase__ :str = field(
metadata={'help': 'The output directory where the model will be written.'} , )
lowerCamelCase__ :str = field(
metadata={
'help': (
'The encoder model checkpoint for weights initialization.'
'Don\'t set if you want to train an encoder model from scratch.'
)
} , )
lowerCamelCase__ :str = field(
metadata={
'help': (
'The decoder model checkpoint for weights initialization.'
'Don\'t set if you want to train a decoder model from scratch.'
)
} , )
lowerCamelCase__ :Optional[str] = field(
default=__A , metadata={'help': 'Pretrained encoder config name or path if not the same as encoder_model_name'} )
lowerCamelCase__ :Optional[str] = field(
default=__A , metadata={'help': 'Pretrained decoder config name or path if not the same as decoder_model_name'} )
def A ( ):
_snake_case : Optional[int] = HfArgumentParser((ModelArguments,) )
(_snake_case ) : Optional[int] = parser.parse_args_into_dataclasses()
# Load pretrained model and tokenizer
# Use explicit specified encoder config
if model_args.encoder_config_name:
_snake_case : int = AutoConfig.from_pretrained(model_args.encoder_config_name )
# Use pretrained encoder model's config
else:
_snake_case : Tuple = AutoConfig.from_pretrained(model_args.encoder_model_name_or_path )
# Use explicit specified decoder config
if model_args.decoder_config_name:
_snake_case : Dict = AutoConfig.from_pretrained(model_args.decoder_config_name )
# Use pretrained decoder model's config
else:
_snake_case : List[str] = AutoConfig.from_pretrained(model_args.decoder_model_name_or_path )
# necessary for `from_encoder_decoder_pretrained` when `decoder_config` is passed
_snake_case : str = True
_snake_case : Optional[int] = True
_snake_case : List[str] = FlaxVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
encoder_pretrained_model_name_or_path=model_args.encoder_model_name_or_path , decoder_pretrained_model_name_or_path=model_args.decoder_model_name_or_path , encoder_config=UpperCAmelCase , decoder_config=UpperCAmelCase , )
# GPT2 only has bos/eos tokens but not decoder_start/pad tokens
_snake_case : Tuple = decoder_config.decoder_start_token_id
_snake_case : Optional[Any] = decoder_config.pad_token_id
if decoder_start_token_id is None:
_snake_case : Any = decoder_config.bos_token_id
if pad_token_id is None:
_snake_case : int = decoder_config.eos_token_id
# This is necessary to make Flax's generate() work
_snake_case : Dict = decoder_config.eos_token_id
_snake_case : Tuple = decoder_start_token_id
_snake_case : Tuple = pad_token_id
_snake_case : Dict = AutoImageProcessor.from_pretrained(model_args.encoder_model_name_or_path )
_snake_case : List[str] = AutoTokenizer.from_pretrained(model_args.decoder_model_name_or_path )
_snake_case : Optional[Any] = tokenizer.convert_ids_to_tokens(model.config.pad_token_id )
model.save_pretrained(model_args.output_dir )
image_processor.save_pretrained(model_args.output_dir )
tokenizer.save_pretrained(model_args.output_dir )
if __name__ == "__main__":
main() | 721 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from .config import config_command_parser
from .config_args import default_config_file, load_config_from_file # noqa: F401
from .default import default_command_parser
from .update import update_command_parser
def A ( UpperCAmelCase=None ):
_snake_case : Union[str, Any] = argparse.ArgumentParser(add_help=UpperCAmelCase , allow_abbrev=UpperCAmelCase )
# The main config parser
_snake_case : Tuple = config_command_parser(UpperCAmelCase )
# The subparser to add commands to
_snake_case : Any = config_parser.add_subparsers(title="subcommands" , dest="subcommand" )
# Then add other parsers with the parent parser
default_command_parser(UpperCAmelCase , parents=[parent_parser] )
update_command_parser(UpperCAmelCase , parents=[parent_parser] )
return config_parser
def A ( ):
_snake_case : str = get_config_parser()
_snake_case : Union[str, Any] = config_parser.parse_args()
if not hasattr(UpperCAmelCase , "func" ):
config_parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase )
if __name__ == "__main__":
main() | 278 | 0 |
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a__ ( UpperCAmelCase__ ):
__magic_name__ : Tuple = (DPMSolverSinglestepScheduler,)
__magic_name__ : List[str] = (('num_inference_steps', 25),)
def lowercase__ (self : List[Any], **__UpperCAmelCase : Optional[int] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"solver_order": 2,
"prediction_type": "epsilon",
"thresholding": False,
"sample_max_value": 1.0,
"algorithm_type": "dpmsolver++",
"solver_type": "midpoint",
"lambda_min_clipped": -float('''inf''' ),
"variance_type": None,
}
config.update(**lowerCamelCase__ )
return config
def lowercase__ (self : Tuple, __UpperCAmelCase : Union[str, Any]=0, **__UpperCAmelCase : Tuple ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop('''num_inference_steps''', lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Tuple = self.dummy_sample
SCREAMING_SNAKE_CASE : Any = 0.1 * sample
SCREAMING_SNAKE_CASE : Union[str, Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : int = self.get_scheduler_config(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE : str = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class.from_pretrained(lowerCamelCase__ )
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals
SCREAMING_SNAKE_CASE : str = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : Union[str, Any] = sample, sample
for t in range(lowerCamelCase__, time_step + scheduler.config.solver_order + 1 ):
SCREAMING_SNAKE_CASE : Optional[int] = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : List[Any] = new_scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ (self : Tuple ) -> Tuple:
"""simple docstring"""
pass
def lowercase__ (self : Dict, __UpperCAmelCase : str=0, **__UpperCAmelCase : Any ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = dict(self.forward_default_kwargs )
SCREAMING_SNAKE_CASE : Dict = kwargs.pop('''num_inference_steps''', lowerCamelCase__ )
SCREAMING_SNAKE_CASE : int = self.dummy_sample
SCREAMING_SNAKE_CASE : List[Any] = 0.1 * sample
SCREAMING_SNAKE_CASE : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCamelCase__ )
scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
SCREAMING_SNAKE_CASE : Any = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(lowerCamelCase__ )
SCREAMING_SNAKE_CASE : int = scheduler_class.from_pretrained(lowerCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(lowerCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
SCREAMING_SNAKE_CASE : str = dummy_past_residuals[: new_scheduler.config.solver_order]
SCREAMING_SNAKE_CASE : List[str] = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[int] = new_scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__, **lowerCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def lowercase__ (self : List[str], __UpperCAmelCase : Optional[Any]=None, **__UpperCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
if scheduler is None:
SCREAMING_SNAKE_CASE : str = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : Optional[int] = self.get_scheduler_config(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = scheduler_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = 10
SCREAMING_SNAKE_CASE : Dict = self.dummy_model()
SCREAMING_SNAKE_CASE : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Tuple = model(lowerCamelCase__, lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ).prev_sample
return sample
def lowercase__ (self : Any ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : Dict = 50
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_model()
SCREAMING_SNAKE_CASE : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(lowerCamelCase__ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
SCREAMING_SNAKE_CASE : Optional[Any] = model(lowerCamelCase__, lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Any = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ).prev_sample
SCREAMING_SNAKE_CASE : Optional[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2574 ) < 1e-3
def lowercase__ (self : List[Any] ) -> int:
"""simple docstring"""
for timesteps in [25, 50, 100, 999, 1000]:
self.check_over_configs(num_train_timesteps=lowerCamelCase__ )
def lowercase__ (self : Tuple ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(scheduler=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
SCREAMING_SNAKE_CASE : int = DEISMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Any = DPMSolverMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : List[Any] = UniPCMultistepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
SCREAMING_SNAKE_CASE : Any = self.full_loop(scheduler=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : int = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def lowercase__ (self : Any ) -> Dict:
"""simple docstring"""
self.check_over_configs(thresholding=lowerCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=lowerCamelCase__, prediction_type=lowerCamelCase__, sample_max_value=lowerCamelCase__, algorithm_type='''dpmsolver++''', solver_order=lowerCamelCase__, solver_type=lowerCamelCase__, )
def lowercase__ (self : Tuple ) -> Optional[Any]:
"""simple docstring"""
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowerCamelCase__ )
def lowercase__ (self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=lowerCamelCase__, solver_type=lowerCamelCase__, prediction_type=lowerCamelCase__, algorithm_type=lowerCamelCase__, )
SCREAMING_SNAKE_CASE : str = self.full_loop(
solver_order=lowerCamelCase__, solver_type=lowerCamelCase__, prediction_type=lowerCamelCase__, algorithm_type=lowerCamelCase__, )
assert not torch.isnan(lowerCamelCase__ ).any(), "Samples have nan numbers"
def lowercase__ (self : Optional[int] ) -> Dict:
"""simple docstring"""
self.check_over_configs(lower_order_final=lowerCamelCase__ )
self.check_over_configs(lower_order_final=lowerCamelCase__ )
def lowercase__ (self : str ) -> List[Any]:
"""simple docstring"""
self.check_over_configs(lambda_min_clipped=-float('''inf''' ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def lowercase__ (self : Tuple ) -> Optional[int]:
"""simple docstring"""
self.check_over_configs(variance_type=lowerCamelCase__ )
self.check_over_configs(variance_type='''learned_range''' )
def lowercase__ (self : Union[str, Any] ) -> int:
"""simple docstring"""
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1000]:
self.check_over_forward(num_inference_steps=lowerCamelCase__, time_step=0 )
def lowercase__ (self : int ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = self.full_loop()
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2791 ) < 1e-3
def lowercase__ (self : int ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = self.full_loop(use_karras_sigmas=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.2248 ) < 1e-3
def lowercase__ (self : str ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Union[str, Any] = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE : List[Any] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.1453 ) < 1e-3
def lowercase__ (self : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = self.full_loop(prediction_type='''v_prediction''', use_karras_sigmas=lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[int] = torch.mean(torch.abs(lowerCamelCase__ ) )
assert abs(result_mean.item() - 0.0649 ) < 1e-3
def lowercase__ (self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE : Tuple = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE : List[str] = self.get_scheduler_config(thresholding=lowerCamelCase__, dynamic_thresholding_ratio=0 )
SCREAMING_SNAKE_CASE : Tuple = scheduler_class(**lowerCamelCase__ )
SCREAMING_SNAKE_CASE : Any = 10
SCREAMING_SNAKE_CASE : Union[str, Any] = self.dummy_model()
SCREAMING_SNAKE_CASE : Optional[int] = self.dummy_sample_deter.half()
scheduler.set_timesteps(lowerCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
SCREAMING_SNAKE_CASE : Dict = model(lowerCamelCase__, lowerCamelCase__ )
SCREAMING_SNAKE_CASE : List[Any] = scheduler.step(lowerCamelCase__, lowerCamelCase__, lowerCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
| 507 | import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def snake_case__ ( lowercase , lowercase ):
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: Any = tmp_path / "cache"
lowerCAmelCase_: int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
lowerCAmelCase_: Optional[int] = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def snake_case__ ( lowercase , lowercase , lowercase , lowercase ):
lowerCAmelCase_: List[str] = tmp_path / "cache"
lowerCAmelCase_: int = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
lowerCAmelCase_: List[str] = features.copy() if features else default_expected_features
lowerCAmelCase_: Any = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
lowerCAmelCase_: int = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowercase , cache_dir=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
def snake_case__ ( lowercase ):
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
lowerCAmelCase_: Any = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: Optional[int] = tmp_path / "cache"
lowerCAmelCase_: Optional[Any] = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: str = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
lowerCAmelCase_: Union[str, Any] = iter_sql_file(lowercase )
lowerCAmelCase_: str = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: str = tmp_path / "cache"
lowerCAmelCase_: Optional[int] = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: Optional[Any] = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
lowerCAmelCase_: Optional[Any] = iter_sql_file(lowercase )
lowerCAmelCase_: Optional[int] = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def snake_case__ ( lowercase , lowercase , lowercase ):
lowerCAmelCase_: Union[str, Any] = tmp_path / "cache"
lowerCAmelCase_: int = os.path.join(lowercase , "tmp.sql" )
lowerCAmelCase_: Any = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
with pytest.raises(lowercase ):
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write() | 613 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowercase = 600851475143 ) -> int:
try:
snake_case : Optional[int] = int(lowercase )
except (TypeError, ValueError):
raise TypeError("""Parameter n must be int or castable to int.""" )
if n <= 0:
raise ValueError("""Parameter n must be greater than or equal to one.""" )
snake_case : str = 2
snake_case : str = 0
if n == 2:
return 2
while n > 2:
while n % i != 0:
i += 1
snake_case : Any = i
while n % i == 0:
snake_case : Dict = n // i
i += 1
return int(lowercase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 684 |
lowerCamelCase : Union[str, Any] = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCamelCase : Tuple = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 684 | 1 |
"""simple docstring"""
import math
def lowercase (_snake_case ,_snake_case ) -> int:
'''simple docstring'''
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowercase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law") | 505 | from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class snake_case ( UpperCamelCase_ ):
lowercase_ = ['image_processor', 'tokenizer']
lowercase_ = 'AutoImageProcessor'
lowercase_ = 'AutoTokenizer'
def __init__( self : List[Any] , a_ : int , a_ : Union[str, Any] )-> List[Any]:
"""simple docstring"""
super().__init__(a_ , a_ )
SCREAMING_SNAKE_CASE__ : str = self.image_processor
def __call__( self : Tuple , a_ : str=None , a_ : List[Any]=None , a_ : Optional[Any]=None , **a_ : Dict )-> Tuple:
"""simple docstring"""
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : List[str] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def __lowercase( self : Dict , *a_ : Any , **a_ : Any )-> List[Any]:
"""simple docstring"""
return self.tokenizer.batch_decode(*a_ , **a_ )
def __lowercase( self : Dict , *a_ : Union[str, Any] , **a_ : Optional[int] )-> Dict:
"""simple docstring"""
return self.tokenizer.decode(*a_ , **a_ )
@property
def __lowercase( self : Any )-> Any:
"""simple docstring"""
return ["input_ids", "attention_mask", "pixel_values"]
| 85 | 0 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
SCREAMING_SNAKE_CASE_ : str = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def UpperCAmelCase__ ( A__ , A__ , A__ ) -> str:
"""simple docstring"""
assert len(str(A__ ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
lowerCamelCase__ = year // 100
lowerCamelCase__ = (5 * (century % 4) + 2) % 7
lowerCamelCase__ = year % 100
lowerCamelCase__ = centurian % 12
lowerCamelCase__ = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
lowerCamelCase__ = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
lowerCamelCase__ = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 274 |
"""simple docstring"""
import argparse
import gc
import json
import os
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
SCREAMING_SNAKE_CASE_ : int = 16
SCREAMING_SNAKE_CASE_ : int = 32
def UpperCAmelCase__ ( A__ ) -> Optional[int]:
"""simple docstring"""
return int(x / 2**20 )
class _A :
def __enter__( self ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated() # reset the peak gauge to zero
lowerCamelCase__ = torch.cuda.memory_allocated()
return self
def __exit__( self , *SCREAMING_SNAKE_CASE__ ) -> Dict:
gc.collect()
torch.cuda.empty_cache()
lowerCamelCase__ = torch.cuda.memory_allocated()
lowerCamelCase__ = torch.cuda.max_memory_allocated()
lowerCamelCase__ = bamb(self.end - self.begin )
lowerCamelCase__ = bamb(self.peak - self.begin )
# print(f"delta used/peak {self.used:4d}/{self.peaked:4d}")
def UpperCAmelCase__ ( A__ , A__ = 16 , A__ = "bert-base-cased" , A__ = 320 , A__ = 160 , ) -> Dict:
"""simple docstring"""
lowerCamelCase__ = AutoTokenizer.from_pretrained(A__ )
lowerCamelCase__ = load_dataset(
"glue" , "mrpc" , split={"train": f'train[:{n_train}]', "validation": f'validation[:{n_val}]'} )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
lowerCamelCase__ = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCamelCase__ = datasets.map(
A__ , batched=A__ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=A__ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCamelCase__ = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(A__ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(A__ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
lowerCamelCase__ = DataLoader(
tokenized_datasets["train"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
lowerCamelCase__ = DataLoader(
tokenized_datasets["validation"] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
def UpperCAmelCase__ ( A__ , A__ ) -> Optional[int]:
"""simple docstring"""
# Initialize accelerator
lowerCamelCase__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCamelCase__ = config["lr"]
lowerCamelCase__ = int(config["num_epochs"] )
lowerCamelCase__ = int(config["seed"] )
lowerCamelCase__ = int(config["batch_size"] )
lowerCamelCase__ = args.model_name_or_path
set_seed(A__ )
lowerCamelCase__ , lowerCamelCase__ = get_dataloaders(A__ , A__ , A__ , args.n_train , args.n_val )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCamelCase__ = AutoModelForSequenceClassification.from_pretrained(A__ , return_dict=A__ )
# Instantiate optimizer
lowerCamelCase__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCamelCase__ = optimizer_cls(params=model.parameters() , lr=A__ )
if accelerator.state.deepspeed_plugin is not None:
lowerCamelCase__ = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
lowerCamelCase__ = 1
lowerCamelCase__ = (len(A__ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCamelCase__ = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=0 , num_training_steps=A__ , )
else:
lowerCamelCase__ = DummyScheduler(A__ , total_num_steps=A__ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# We need to keep track of how many total steps we have iterated over
lowerCamelCase__ = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCamelCase__ = 0
# Now we train the model
lowerCamelCase__ = {}
for epoch in range(A__ , A__ ):
with TorchTracemalloc() as tracemalloc:
model.train()
for step, batch in enumerate(A__ ):
lowerCamelCase__ = model(**A__ )
lowerCamelCase__ = outputs.loss
lowerCamelCase__ = loss / gradient_accumulation_steps
accelerator.backward(A__ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
# Printing the GPU memory usage details such as allocated memory, peak memory, and total memory usage
accelerator.print("Memory before entering the train : {}".format(bamb(tracemalloc.begin ) ) )
accelerator.print("Memory consumed at the end of the train (end-begin): {}".format(tracemalloc.used ) )
accelerator.print("Peak Memory consumed during the train (max-begin): {}".format(tracemalloc.peaked ) )
accelerator.print(
"Total Peak Memory consumed during the train (max): {}".format(
tracemalloc.peaked + bamb(tracemalloc.begin ) ) )
lowerCamelCase__ = tracemalloc.peaked + bamb(tracemalloc.begin )
if args.peak_memory_upper_bound is not None:
assert (
train_total_peak_memory[f'epoch-{epoch}'] <= args.peak_memory_upper_bound
), "Peak memory usage exceeded the upper bound"
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "peak_memory_utilization.json" ) , "w" ) as f:
json.dump(A__ , A__ )
def UpperCAmelCase__ ( ) -> Any:
"""simple docstring"""
lowerCamelCase__ = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=A__ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=A__ , )
parser.add_argument(
"--output_dir" , type=A__ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--peak_memory_upper_bound" , type=A__ , default=A__ , help="The upper bound of peak memory usage in MB. If set, the training will throw an error if the peak memory usage exceeds this value." , )
parser.add_argument(
"--n_train" , type=A__ , default=320 , help="Number of training examples to use." , )
parser.add_argument(
"--n_val" , type=A__ , default=160 , help="Number of validation examples to use." , )
parser.add_argument(
"--num_epochs" , type=A__ , default=1 , help="Number of train epochs." , )
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 274 | 1 |
"""simple docstring"""
from __future__ import annotations
from decimal import Decimal
from math import * # noqa: F403
from sympy import diff
def _snake_case ( snake_case__ : str , snake_case__ : float | Decimal , snake_case__ : float = 10**-10 ):
A = a
while True:
A = Decimal(snake_case__ ) - (
Decimal(eval(snake_case__ ) ) / Decimal(eval(str(diff(snake_case__ ) ) ) ) # noqa: S307
)
# This number dictates the accuracy of the answer
if abs(eval(snake_case__ ) ) < precision: # noqa: S307
return float(snake_case__ )
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
print(F"""The root of x**2 - 5*x + 2 = 0 is {newton_raphson('x**2 - 5*x + 2', 0.4)}""")
# Find Square Root of 5
print(F"""The root of log(x) - 1 = 0 is {newton_raphson('log(x) - 1', 2)}""")
# Exponential Roots
print(F"""The root of exp(x) - 1 = 0 is {newton_raphson('exp(x) - 1', 0)}""") | 91 |
def _lowerCAmelCase ( __magic_name__ :str ):
UpperCAmelCase_ = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def _lowerCAmelCase ( __magic_name__ :str ):
UpperCAmelCase_ = [chr(i + 6_5 ) for i in range(2_6 )]
# Remove duplicate characters from key
UpperCAmelCase_ = remove_duplicates(key.upper() )
UpperCAmelCase_ = len(__magic_name__ )
# First fill cipher with key characters
UpperCAmelCase_ = {alphabet[i]: char for i, char in enumerate(__magic_name__ )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(__magic_name__ ) , 2_6 ):
UpperCAmelCase_ = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
UpperCAmelCase_ = alphabet[i - offset]
UpperCAmelCase_ = char
return cipher_alphabet
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :dict[str, str] ):
return "".join(cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :dict[str, str] ):
UpperCAmelCase_ = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(__magic_name__ , __magic_name__ ) for ch in message.upper() )
def _lowerCAmelCase ( ):
UpperCAmelCase_ = input('''Enter message to encode or decode: ''' ).strip()
UpperCAmelCase_ = input('''Enter keyword: ''' ).strip()
UpperCAmelCase_ = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
UpperCAmelCase_ = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
UpperCAmelCase_ = create_cipher_map(__magic_name__ )
print(func(__magic_name__ , __magic_name__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121 | 0 |
"""simple docstring"""
import math
def _SCREAMING_SNAKE_CASE ( __snake_case : int ):
'''simple docstring'''
assert isinstance(__snake_case , __snake_case ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
lowercase = range(3 , int(math.sqrt(__snake_case ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] , __snake_case : int=1 , **__snake_case : Optional[int] ):
'''simple docstring'''
lowercase = factor * value
lowercase = value
while not is_prime(__snake_case ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **__snake_case )
return value
| 134 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCamelCase : Tuple = logging.get_logger(__name__)
_UpperCamelCase : Dict = torch.device('cpu')
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
lowercase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowercase = Image.open(requests.get(__snake_case , stream=__snake_case ).raw )
return im
def _SCREAMING_SNAKE_CASE ( __snake_case : List[str] ):
'''simple docstring'''
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] )
def _SCREAMING_SNAKE_CASE ( __snake_case : Optional[int] , __snake_case : str , __snake_case : Any ):
'''simple docstring'''
lowercase = dct.pop(__snake_case )
lowercase = val
def _SCREAMING_SNAKE_CASE ( __snake_case : Any ):
'''simple docstring'''
lowercase = []
for k in state_dict.keys():
lowercase = k
if ".pwconv" in k:
lowercase = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
lowercase = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
lowercase = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
lowercase = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
lowercase = k_new.split('.' )
if ls[2].isdigit():
lowercase = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
lowercase = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : Tuple , __snake_case : List[str] ):
'''simple docstring'''
lowercase = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
lowercase = 10_00
lowercase = 'huggingface/label-files'
lowercase = 'imagenet-1k-id2label.json'
lowercase = json.load(open(hf_hub_download(__snake_case , __snake_case , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__snake_case ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
lowercase = [3, 3, 6, 4]
lowercase = [48, 56, 1_12, 2_20]
elif swiftformer_name == "swiftformer_s":
lowercase = [3, 3, 9, 6]
lowercase = [48, 64, 1_68, 2_24]
elif swiftformer_name == "swiftformer_l1":
lowercase = [4, 3, 10, 5]
lowercase = [48, 96, 1_92, 3_84]
elif swiftformer_name == "swiftformer_l3":
lowercase = [4, 4, 12, 6]
lowercase = [64, 1_28, 3_20, 5_12]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
lowercase = torch.hub.load_state_dict_from_url(__snake_case , map_location='cpu' , check_hash=__snake_case )
else:
lowercase = torch.load(__snake_case , map_location='cpu' )
lowercase = checkpoint
lowercase = create_rename_keys(__snake_case )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__snake_case , __snake_case , __snake_case )
# load HuggingFace model
lowercase = SwiftFormerForImageClassification(__snake_case ).eval()
hf_model.load_state_dict(__snake_case )
# prepare test inputs
lowercase = prepare_img()
lowercase = ViTImageProcessor.from_pretrained('preprocessor_config' )
lowercase = processor(images=__snake_case , return_tensors='pt' )
# compare outputs from both models
lowercase = get_expected_output(__snake_case )
lowercase = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 10_00] )
assert torch.allclose(hf_logits[0, 0:5] , __snake_case , atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(f'Saving model {swiftformer_name} to {pytorch_dump_folder_path}' )
hf_model.save_pretrained(__snake_case )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--swiftformer_name',
default='swiftformer_xs',
choices=['swiftformer_xs', 'swiftformer_s', 'swiftformer_l1', 'swiftformer_l3'],
type=str,
help='Name of the SwiftFormer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='./converted_outputs/',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--original_ckpt', default=None, type=str, help='Path to the original model checkpoint.')
_UpperCamelCase : Union[str, Any] = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 134 | 1 |
"""simple docstring"""
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , _snake_case : List[str] , _snake_case : Optional[Any]=13 , _snake_case : Tuple=10 , _snake_case : str=3 , _snake_case : Dict=2 , _snake_case : int=2 , _snake_case : Union[str, Any]=True , _snake_case : int=True , _snake_case : Optional[int]=32 , _snake_case : Optional[Any]=5 , _snake_case : int=4 , _snake_case : Any=37 , _snake_case : Optional[Any]="gelu" , _snake_case : Dict=0.1 , _snake_case : Dict=0.1 , _snake_case : str=10 , _snake_case : Union[str, Any]=0.02 , _snake_case : Dict="divided_space_time" , _snake_case : Tuple=None , ) -> Tuple:
'''simple docstring'''
a__ = parent
a__ = batch_size
a__ = image_size
a__ = num_channels
a__ = patch_size
a__ = num_frames
a__ = is_training
a__ = use_labels
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = attention_type
a__ = initializer_range
a__ = scope
a__ = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a__ = (image_size // patch_size) ** 2
a__ = (num_frames) * self.num_patches_per_frame + 1
def _lowerCAmelCase ( self : Tuple ) -> str:
'''simple docstring'''
a__ = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
a__ = None
if self.use_labels:
a__ = ids_tensor([self.batch_size] , self.num_labels )
a__ = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Any ) -> Tuple:
'''simple docstring'''
a__ = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a__ = self.num_labels
return config
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : Dict , _snake_case : int , _snake_case : Tuple ) -> Optional[Any]:
'''simple docstring'''
a__ = TimesformerModel(config=_snake_case )
model.to(_snake_case )
model.eval()
a__ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCAmelCase ( self : Dict , _snake_case : Any , _snake_case : List[Any] , _snake_case : int ) -> Any:
'''simple docstring'''
a__ = TimesformerForVideoClassification(_snake_case )
model.to(_snake_case )
model.eval()
a__ = model(_snake_case )
# verify the logits shape
a__ = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , _snake_case )
def _lowerCAmelCase ( self : Optional[int] ) -> str:
'''simple docstring'''
a__ = self.prepare_config_and_inputs()
a__ , a__ , a__ = config_and_inputs
a__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( a , a , unittest.TestCase ):
"""simple docstring"""
a_ : Optional[Any] =(TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
a_ : str =(
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
a_ : Any =False
a_ : Dict =False
a_ : Dict =False
a_ : Union[str, Any] =False
def _lowerCAmelCase ( self : Dict ) -> List[str]:
'''simple docstring'''
a__ = TimesformerModelTester(self )
a__ = ConfigTester(
self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def _lowerCAmelCase ( self : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : List[Any]=False ) -> Optional[Any]:
'''simple docstring'''
a__ = copy.deepcopy(_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
a__ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def _lowerCAmelCase ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def _lowerCAmelCase ( self : List[str] ) -> Dict:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = model_class(_snake_case )
a__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ = [*signature.parameters.keys()]
a__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _snake_case )
def _lowerCAmelCase ( self : List[Any] ) -> Any:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def _lowerCAmelCase ( self : str ) -> int:
'''simple docstring'''
a__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*_snake_case )
@slow
def _lowerCAmelCase ( self : int ) -> Dict:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ = TimesformerModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def _lowerCAmelCase ( self : List[str] ) -> Any:
'''simple docstring'''
if not self.has_attentions:
pass
else:
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
a__ = True
for model_class in self.all_model_classes:
a__ = self.model_tester.seq_length
a__ = self.model_tester.num_frames
a__ = True
a__ = False
a__ = True
a__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ = True
a__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a__ = len(_snake_case )
# Check attention is always last and order is fine
a__ = True
a__ = True
a__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
self.assertEqual(out_len + 1 , len(_snake_case ) )
a__ = outputs.attentions
self.assertEqual(len(_snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowerCAmelCase ( self : str ) -> Tuple:
'''simple docstring'''
def check_hidden_states_output(_snake_case : str , _snake_case : Any , _snake_case : Optional[Any] ):
a__ = model_class(_snake_case )
model.to(_snake_case )
model.eval()
with torch.no_grad():
a__ = model(**self._prepare_for_class(_snake_case , _snake_case ) )
a__ = outputs.hidden_states
a__ = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(_snake_case ) , _snake_case )
a__ = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
a__ , a__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ = True
check_hidden_states_output(_snake_case , _snake_case , _snake_case )
def _lowerCamelCase ( ) -> Optional[Any]:
'''simple docstring'''
a__ = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video',filename='eating_spaghetti.npy',repo_type='dataset' )
a__ = np.load(UpperCAmelCase__ )
return list(UpperCAmelCase__ )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowerCAmelCase ( self : Dict ) -> Tuple:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowerCAmelCase ( self : Tuple ) -> Dict:
'''simple docstring'''
a__ = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
_snake_case )
a__ = self.default_image_processor
a__ = prepare_video()
a__ = image_processor(video[:8] , return_tensors='pt' ).to(_snake_case )
# forward pass
with torch.no_grad():
a__ = model(**_snake_case )
# verify the logits
a__ = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , _snake_case )
a__ = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1E-4 ) )
| 232 | """simple docstring"""
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__=None ) -> int:
'''simple docstring'''
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
a__ = nn.Parameter(UpperCAmelCase__ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
a__ = nn.Parameter(UpperCAmelCase__ )
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ = np.asarray(weights[0] )
a__ = np.asarray(weights[1] )
a__ = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.self_attention.value,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.output.dense,torch.tensor(UpperCAmelCase__ ).view(-1,UpperCAmelCase__ ).contiguous().transpose(0,1 ),)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Dict:
'''simple docstring'''
a__ = np.asarray(weights[0] )
a__ = np.asarray(weights[1] )
a__ = np.asarray(weights[2] )
a__ = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.self_attention.key,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.self_attention.value,torch.tensor(UpperCAmelCase__ ).transpose(1,2 ).contiguous().view(-1,UpperCAmelCase__ ),)
set_param(
torch_layer.output.dense,torch.tensor(UpperCAmelCase__ ).view(-1,UpperCAmelCase__ ).contiguous().transpose(0,1 ),)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
a__ = weights[0][0][0]
a__ = np.asarray(layer_norm_a[0] )
a__ = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm,torch.tensor(UpperCAmelCase__ ),torch.tensor(UpperCAmelCase__ ),)
# lsh weights + output
a__ = weights[0][1]
if len(UpperCAmelCase__ ) < 4:
set_layer_weights_in_torch_lsh(UpperCAmelCase__,torch_block.attention,UpperCAmelCase__ )
else:
set_layer_weights_in_torch_local(UpperCAmelCase__,torch_block.attention,UpperCAmelCase__ )
# intermediate weighs
a__ = weights[2][0][1][2]
# Chunked Feed Forward
if len(UpperCAmelCase__ ) == 4:
a__ = intermediate_weights[2]
# layernorm 2
a__ = np.asarray(intermediate_weights[0][0] )
a__ = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm,torch.tensor(UpperCAmelCase__ ),torch.tensor(UpperCAmelCase__ ),)
# intermediate dense
a__ = np.asarray(intermediate_weights[1][0] )
a__ = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense,torch.tensor(UpperCAmelCase__ ).transpose(0,1 ).contiguous(),torch.tensor(UpperCAmelCase__ ),)
# intermediate out
a__ = np.asarray(intermediate_weights[4][0] )
a__ = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense,torch.tensor(UpperCAmelCase__ ).transpose(0,1 ).contiguous(),torch.tensor(UpperCAmelCase__ ),)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> List[Any]:
'''simple docstring'''
a__ = torch_model.reformer
# word embeds
a__ = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings,torch.tensor(UpperCAmelCase__ ),)
if isinstance(weights[3],UpperCAmelCase__ ):
a__ = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
a__ = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
a__ = nn.Parameter(torch.tensor(UpperCAmelCase__ ) )
a__ = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
UpperCAmelCase__ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
a__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ )
# output layer norm
a__ = np.asarray(weights[7][0] )
a__ = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm,torch.tensor(UpperCAmelCase__ ),torch.tensor(UpperCAmelCase__ ),)
# output embeddings
a__ = np.asarray(weights[9][0] )
a__ = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder,torch.tensor(UpperCAmelCase__ ).transpose(0,1 ).contiguous(),torch.tensor(UpperCAmelCase__ ),)
def _lowerCamelCase ( UpperCAmelCase__,UpperCAmelCase__,UpperCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
a__ = ReformerConfig.from_json_file(UpperCAmelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
a__ = ReformerModelWithLMHead(UpperCAmelCase__ )
with open(UpperCAmelCase__,'rb' ) as f:
a__ = pickle.load(UpperCAmelCase__ )['weights']
set_model_weights_in_torch(UpperCAmelCase__,UpperCAmelCase__,config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict(),UpperCAmelCase__ )
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--trax_model_pkl_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained Reformer model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__magic_name__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
| 232 | 1 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
UpperCAmelCase__ : Optional[Any] = logging.get_logger(__name__)
UpperCAmelCase__ : Any = {
'''EleutherAI/gpt-neo-1.3B''': '''https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json''',
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __lowercase ( snake_case__ ):
__UpperCAmelCase = '''gpt_neo'''
__UpperCAmelCase = ['''past_key_values''']
__UpperCAmelCase = {'''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''}
def __init__( self , lowercase_=5_0_2_5_7 , lowercase_=2_0_4_8 , lowercase_=2_0_4_8 , lowercase_=2_4 , lowercase_=[[["global", "local"], 1_2]] , lowercase_=1_6 , lowercase_=None , lowercase_=2_5_6 , lowercase_="gelu_new" , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.0 , lowercase_=0.1 , lowercase_=1e-5 , lowercase_=0.02 , lowercase_=True , lowercase_=5_0_2_5_6 , lowercase_=5_0_2_5_6 , **lowercase_ , ) -> Optional[int]:
__snake_case = vocab_size
__snake_case = max_position_embeddings
__snake_case = hidden_size
__snake_case = num_layers
__snake_case = num_heads
__snake_case = intermediate_size
__snake_case = window_size
__snake_case = activation_function
__snake_case = resid_dropout
__snake_case = embed_dropout
__snake_case = attention_dropout
__snake_case = classifier_dropout
__snake_case = layer_norm_epsilon
__snake_case = initializer_range
__snake_case = use_cache
__snake_case = bos_token_id
__snake_case = eos_token_id
__snake_case = attention_types
__snake_case = self.expand_attention_types_params(_A)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.attention_layers)` == `config.num_layers` '
F"but is `len(config.attention_layers) = {len(self.attention_layers)}`, "
F"`config.num_layers = {self.num_layers}`. "
'`config.attention_layers` is prepared using `config.attention_types`. '
'Please verify the value of `config.attention_types` argument.')
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A)
@staticmethod
def _a ( lowercase_) -> List[str]:
__snake_case = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def A ( snake_case__ : int , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
import torch
__snake_case = input.size()
__snake_case = len(snake_case__ )
__snake_case = shape[dimension]
__snake_case = torch.arange(0 , snake_case__ , snake_case__ )
__snake_case = torch.div(sizedim - size , snake_case__ , rounding_mode='floor' ) + 1
__snake_case = torch.arange(snake_case__ ) + low_indices[:min_length][:, None]
__snake_case = [slice(snake_case__ )] * rank
__snake_case = indices
__snake_case = input[s]
__snake_case = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(snake_case__ )
def A ( snake_case__ : Any , snake_case__ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
import torch
__snake_case = torch.arange(1 , snake_case__ )
__snake_case = torch.remainder(snake_case__ , snake_case__ )
__snake_case = remainders == 0
__snake_case = candidates[divisor_indices]
__snake_case = torch.max(snake_case__ )
return largest_divisor, torch.div(snake_case__ , snake_case__ , rounding_mode='floor' )
class __lowercase ( snake_case__ ):
@property
def _a ( self) -> int:
__snake_case = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs')
__snake_case = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__snake_case = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def _a ( self) -> Dict:
return self._config.num_heads
def _a ( self , lowercase_ , lowercase_ = -1 , lowercase_ = -1 , lowercase_ = False , lowercase_ = None , ) -> int:
__snake_case = super(_A , self).generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A)
# We need to order the input in the way they appears in the forward()
__snake_case = OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__snake_case = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__snake_case = seqlen + 2
__snake_case = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__snake_case = [
(torch.zeros(_A), torch.zeros(_A)) for _ in range(self.num_layers)
]
__snake_case = common_inputs['attention_mask']
if self.use_past:
__snake_case = ordered_inputs['attention_mask'].dtype
__snake_case = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A)] , dim=1)
return ordered_inputs
@property
def _a ( self) -> Tuple:
return 1_3
| 711 |
import numpy as np
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return 1 / (1 + np.exp(-vector ))
def A ( snake_case__ : np.ndarray ) -> np.ndarray:
'''simple docstring'''
return vector * sigmoid(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 676 | 0 |
lowerCAmelCase__ : List[Any] ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase__ : List[Any] =[{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase__ : str ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 101 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
SCREAMING_SNAKE_CASE__ : Optional[int] = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( __lowercase , __lowercase ):
@register_to_config
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE__ : bool , SCREAMING_SNAKE_CASE__ : Optional[int] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = None ) -> List[str]:
super().__init__()
__lowerCamelCase = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
__lowerCamelCase = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else:
__lowerCamelCase = None
__lowerCamelCase = torch.nn.Parameter(SCREAMING_SNAKE_CASE__ )
class lowerCAmelCase__ ( __lowercase ):
a__ : VQModel
a__ : CLIPTextModel
a__ : CLIPTokenizer
a__ : TransformeraDModel
a__ : LearnedClassifierFreeSamplingEmbeddings
a__ : VQDiffusionScheduler
def __init__( self : int , SCREAMING_SNAKE_CASE__ : VQModel , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : TransformeraDModel , SCREAMING_SNAKE_CASE__ : VQDiffusionScheduler , SCREAMING_SNAKE_CASE__ : LearnedClassifierFreeSamplingEmbeddings , ) -> Any:
super().__init__()
self.register_modules(
vqvae=SCREAMING_SNAKE_CASE__ , transformer=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , learned_classifier_free_sampling_embeddings=SCREAMING_SNAKE_CASE__ , )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else 1
# get prompt text embeddings
__lowerCamelCase = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
__lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
__lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
__lowerCamelCase = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate text embeddings for each generation per prompt
__lowerCamelCase = prompt_embeds.repeat_interleave(SCREAMING_SNAKE_CASE__ , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
__lowerCamelCase = self.learned_classifier_free_sampling_embeddings.embeddings
__lowerCamelCase = negative_prompt_embeds.unsqueeze(0 ).repeat(SCREAMING_SNAKE_CASE__ , 1 , 1 )
else:
__lowerCamelCase = [''''''] * batch_size
__lowerCamelCase = text_input_ids.shape[-1]
__lowerCamelCase = self.tokenizer(
SCREAMING_SNAKE_CASE__ , padding='''max_length''' , max_length=SCREAMING_SNAKE_CASE__ , truncation=SCREAMING_SNAKE_CASE__ , return_tensors='''pt''' , )
__lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
__lowerCamelCase = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=SCREAMING_SNAKE_CASE__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCamelCase = negative_prompt_embeds.shape[1]
__lowerCamelCase = negative_prompt_embeds.repeat(1 , SCREAMING_SNAKE_CASE__ , 1 )
__lowerCamelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , SCREAMING_SNAKE_CASE__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCamelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self : int , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : int = 1_00 , SCREAMING_SNAKE_CASE__ : float = 5.0 , SCREAMING_SNAKE_CASE__ : float = 1.0 , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[str] = "pil" , SCREAMING_SNAKE_CASE__ : bool = True , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = 1
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase = len(SCREAMING_SNAKE_CASE__ )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(SCREAMING_SNAKE_CASE__ )}''' )
__lowerCamelCase = batch_size * num_images_per_prompt
__lowerCamelCase = guidance_scale > 1.0
__lowerCamelCase = self._encode_prompt(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(SCREAMING_SNAKE_CASE__ )}.''' )
# get the initial completely masked latents unless the user supplied it
__lowerCamelCase = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
__lowerCamelCase = self.transformer.num_vector_embeds - 1
__lowerCamelCase = torch.full(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'''Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'''
f''' {self.transformer.num_vector_embeds - 1} (inclusive).''' )
__lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(SCREAMING_SNAKE_CASE__ , device=self.device )
__lowerCamelCase = self.scheduler.timesteps.to(self.device )
__lowerCamelCase = latents
for i, t in enumerate(self.progress_bar(SCREAMING_SNAKE_CASE__ ) ):
# expand the sample if we are doing classifier free guidance
__lowerCamelCase = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
__lowerCamelCase = self.transformer(SCREAMING_SNAKE_CASE__ , encoder_hidden_states=SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ ).sample
if do_classifier_free_guidance:
__lowerCamelCase , __lowerCamelCase = model_output.chunk(2 )
__lowerCamelCase = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(SCREAMING_SNAKE_CASE__ , dim=1 , keepdim=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.truncate(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# remove `log(0)`'s (`-inf`s)
__lowerCamelCase = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCamelCase = self.scheduler.step(SCREAMING_SNAKE_CASE__ , timestep=SCREAMING_SNAKE_CASE__ , sample=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.vqvae.config.vq_embed_dim
__lowerCamelCase = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
__lowerCamelCase = self.vqvae.quantize.get_codebook_entry(SCREAMING_SNAKE_CASE__ , shape=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.vqvae.decode(SCREAMING_SNAKE_CASE__ , force_not_quantize=SCREAMING_SNAKE_CASE__ ).sample
__lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCamelCase = self.numpy_to_pil(SCREAMING_SNAKE_CASE__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=SCREAMING_SNAKE_CASE__ )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : torch.FloatTensor , SCREAMING_SNAKE_CASE__ : float ) -> torch.FloatTensor:
__lowerCamelCase , __lowerCamelCase = torch.sort(SCREAMING_SNAKE_CASE__ , 1 , descending=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.exp(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
__lowerCamelCase = torch.full_like(keep_mask[:, 0:1, :] , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = torch.cat((all_true, keep_mask) , dim=1 )
__lowerCamelCase = keep_mask[:, :-1, :]
__lowerCamelCase = keep_mask.gather(1 , indices.argsort(1 ) )
__lowerCamelCase = log_p_x_0.clone()
__lowerCamelCase = -torch.inf # -inf = log(0)
return rv
| 298 | 0 |
import string
from math import logaa
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__lowercase = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
__lowercase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
__lowercase = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
__lowercase = corpus_without_punctuation.split("""\n""" )
__lowercase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(UpperCamelCase__ ))
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]=False ):
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : int ):
"""simple docstring"""
return round(tf * idf , 3 )
| 704 |
"""simple docstring"""
def lowerCAmelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
assert x is not None
assert y is not None
__lowercase = len(UpperCamelCase__ )
__lowercase = len(UpperCamelCase__ )
# declaring the array for storing the dp values
__lowercase = [[0] * (n + 1) for _ in range(m + 1 )] # noqa: E741
for i in range(1 , m + 1 ):
for j in range(1 , n + 1 ):
__lowercase = 1 if x[i - 1] == y[j - 1] else 0
__lowercase = max(l[i - 1][j] , l[i][j - 1] , l[i - 1][j - 1] + match )
__lowercase = """"""
__lowercase , __lowercase = m, n
while i > 0 and j > 0:
__lowercase = 1 if x[i - 1] == y[j - 1] else 0
if l[i][j] == l[i - 1][j - 1] + match:
if match == 1:
__lowercase = x[i - 1] + seq
i -= 1
j -= 1
elif l[i][j] == l[i - 1][j]:
i -= 1
else:
j -= 1
return l[m][n], seq
if __name__ == "__main__":
UpperCAmelCase__ ="AGGTAB"
UpperCAmelCase__ ="GXTXAYB"
UpperCAmelCase__ =4
UpperCAmelCase__ ="GTAB"
UpperCAmelCase__ , UpperCAmelCase__ =longest_common_subsequence(a, b)
print("len =", ln, ", sub-sequence =", subseq)
import doctest
doctest.testmod()
| 442 | 0 |
'''simple docstring'''
def a__ ( lowercase : int, lowercase : int ) -> str:
"""simple docstring"""
return "\n".join(
F"""{number} * {i} = {number * i}""" for i in range(1, number_of_terms + 1 ) )
if __name__ == "__main__":
print(multiplication_table(number=5, number_of_terms=10))
| 98 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _snake_case (__lowercase=32 , __lowercase=10 , __lowercase=100 , __lowercase=1026 , __lowercase=True , __lowercase="data/tokenized_stories_train_wikitext103.jbl" , __lowercase="igf_context_pairs.jbl" , ):
set_seed(3)
# generate train_data and objective_set
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
__lowercase , __lowercase , number=__lowercase , min_len=1026 , trim=__lowercase)
# keeps model same across runs
set_seed(4)
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# load pretrained model
UpperCamelCase_ = load_gpta('gpt2').to(__lowercase)
print('computing perplexity on objective set')
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase).item()
print('perplexity on objective set:' , __lowercase)
# collect igf pairs and save to file demo.jbl
collect_objective_set(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase)
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _snake_case (__lowercase , __lowercase=15 , __lowercase=128 , __lowercase=100 , __lowercase="igf_model.pt" , ):
set_seed(42)
# Load pre-trained model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
# Initialize secondary learner to use embedding weights of model
UpperCamelCase_ = SecondaryLearner(__lowercase)
# Train secondary learner
UpperCamelCase_ = train_secondary_learner(
__lowercase , __lowercase , max_epochs=__lowercase , batch_size=__lowercase , eval_freq=100 , igf_model_path=__lowercase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _snake_case (__lowercase , __lowercase , __lowercase , __lowercase=32 , __lowercase=1000 , __lowercase=16 , __lowercase=1.0 , __lowercase=recopy_gpta , __lowercase=None , __lowercase=10 , __lowercase="gpt2_finetuned.pt" , ):
UpperCamelCase_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
UpperCamelCase_ = RandomSampler(__lowercase)
UpperCamelCase_ = DataLoader(__lowercase , sampler=__lowercase)
UpperCamelCase_ = max_steps // (len(__lowercase)) + 1
UpperCamelCase_ = 0
UpperCamelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__lowercase)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = recopy_model(__lowercase , __lowercase , __lowercase)
model.train()
if secondary_learner is not None:
secondary_learner.to(__lowercase)
secondary_learner.eval()
UpperCamelCase_ = []
UpperCamelCase_ = 0
UpperCamelCase_ = []
UpperCamelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
for epoch in range(int(__lowercase)):
for step, example in enumerate(__lowercase):
torch.cuda.empty_cache()
UpperCamelCase_ = random.randint(0 , example.size(2) - context_len - 1)
UpperCamelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCamelCase_ = model(__lowercase , labels=__lowercase)
UpperCamelCase_ = True
if secondary_learner is not None:
UpperCamelCase_ = secondary_learner.forward(
torch.tensor(__lowercase , dtype=torch.long , device=__lowercase).unsqueeze(0))[0].item()
observed_qs.append(float(__lowercase))
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCamelCase_ = -1
if predicted_q < threshold:
UpperCamelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu()))
UpperCamelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCamelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0)
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCamelCase_ = compute_perplexity(__lowercase , __lowercase , __lowercase)
test_perps.append(__lowercase)
print('Test perplexity, step' , __lowercase , ':' , __lowercase)
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __lowercase)
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _snake_case ():
UpperCamelCase_ = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task')
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowercase , type=__lowercase , required=__lowercase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=__lowercase , default=__lowercase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=__lowercase , default=__lowercase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=__lowercase , type=__lowercase , required=__lowercase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=__lowercase , type=__lowercase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=__lowercase , default=__lowercase , help='A seed for reproducible training.')
parser.add_argument(
'--context_len' , default=32 , type=__lowercase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=__lowercase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=__lowercase , help='secondary model evaluation is triggered at eval_freq')
parser.add_argument('--max_steps' , default=1000 , type=__lowercase , help='To calculate training epochs')
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=__lowercase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=__lowercase , help='batch size of training data of language model(gpt2) ')
parser.add_argument(
'--eval_interval' , default=10 , type=__lowercase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=__lowercase , help='The number of examples split to be used as objective_set/test_data')
parser.add_argument(
'--min_len' , default=1026 , type=__lowercase , help='The minimum length of the article to be used as objective set')
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=__lowercase , help='number of epochs to train secondary learner')
parser.add_argument('--trim' , default=__lowercase , type=__lowercase , help='truncate the example if it exceeds context length')
parser.add_argument(
'--threshold' , default=1.0 , type=__lowercase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=__lowercase , help='finetuned_model_name')
parser.add_argument(
'--recopy_model' , default=__lowercase , type=__lowercase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__lowercase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
UpperCamelCase_ = joblib.load('data/IGF_values.jbl')
# Train secondary learner
UpperCamelCase_ = training_secondary_learner(
__lowercase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
UpperCamelCase_ = GPTaLMHeadModel.from_pretrained('gpt2')
set_seed(42)
# Generate train and test data to train and evaluate gpt2 model
UpperCamelCase_ , UpperCamelCase_ = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=__lowercase)
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__lowercase , __lowercase , __lowercase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__lowercase , secondary_learner=__lowercase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main()
| 23 | 0 |
def A ( __UpperCAmelCase , __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
UpperCAmelCase_ = int(__UpperCAmelCase )
# Initialize Result
UpperCAmelCase_ = []
# Traverse through all denomination
for denomination in reversed(__UpperCAmelCase ):
# Find denominations
while int(__UpperCAmelCase ) >= int(__UpperCAmelCase ):
total_value -= int(__UpperCAmelCase )
answer.append(__UpperCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = "0"
if (
input("Do you want to enter your denominations ? (yY/n): ").strip().lower()
== "y"
):
UpperCamelCase_ = int(input("Enter the number of denominations you want to add: ").strip())
for i in range(0, n):
denominations.append(int(input(f"Denomination {i}: ").strip()))
UpperCamelCase_ = input("Enter the change you want to make in Indian Currency: ").strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
UpperCamelCase_ = input("Enter the change you want to make: ").strip()
if int(value) == 0 or int(value) < 0:
print("The total value cannot be zero or negative.")
else:
print(f"Following is minimal change for {value}: ")
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=" ")
| 561 |
import unittest
from transformers import SPIECE_UNDERLINE
from transformers.models.speechta import SpeechTaTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.tokenization_utils import AddedToken
from ...test_tokenization_common import TokenizerTesterMixin
UpperCamelCase_ = get_tests_dir("fixtures/test_sentencepiece_bpe_char.model")
@require_sentencepiece
@require_tokenizers
class a_ ( _snake_case , unittest.TestCase ):
UpperCamelCase__ : List[Any] =SpeechTaTokenizer
UpperCamelCase__ : Optional[Any] =False
UpperCamelCase__ : List[Any] =True
def __a ( self :Union[str, Any]) -> int:
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = SpeechTaTokenizer(_lowercase)
UpperCAmelCase_ = AddedToken('''<mask>''' , lstrip=_lowercase , rstrip=_lowercase)
UpperCAmelCase_ = mask_token
tokenizer.add_special_tokens({'''mask_token''': mask_token})
tokenizer.add_tokens(['''<ctc_blank>'''])
tokenizer.save_pretrained(self.tmpdirname)
def __a ( self :Tuple , _lowercase :Optional[int]) -> Optional[int]:
UpperCAmelCase_ = '''this is a test'''
UpperCAmelCase_ = '''this is a test'''
return input_text, output_text
def __a ( self :List[Any] , _lowercase :str , _lowercase :List[str]=False , _lowercase :Union[str, Any]=20 , _lowercase :Any=5) -> Optional[int]:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_input_output_texts(_lowercase)
UpperCAmelCase_ = tokenizer.encode(_lowercase , add_special_tokens=_lowercase)
UpperCAmelCase_ = tokenizer.decode(_lowercase , clean_up_tokenization_spaces=_lowercase)
return text, ids
def __a ( self :Dict) -> str:
UpperCAmelCase_ = '''<pad>'''
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_lowercase) , _lowercase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_lowercase) , _lowercase)
def __a ( self :int) -> Dict:
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , '''<s>''')
self.assertEqual(vocab_keys[1] , '''<pad>''')
self.assertEqual(vocab_keys[-4] , '''œ''')
self.assertEqual(vocab_keys[-2] , '''<mask>''')
self.assertEqual(vocab_keys[-1] , '''<ctc_blank>''')
self.assertEqual(len(_lowercase) , 81)
def __a ( self :Dict) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 79)
def __a ( self :Dict) -> Union[str, Any]:
UpperCAmelCase_ = self.get_tokenizers(do_lower_case=_lowercase)
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}"):
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
# We usually have added tokens from the start in tests because our vocab fixtures are
# smaller than the original vocabs - let's not assert this
# self.assertEqual(vocab_size, all_size)
UpperCAmelCase_ = ['''aaaaa bbbbbb''', '''cccccccccdddddddd''']
UpperCAmelCase_ = tokenizer.add_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode('''aaaaa bbbbbb low cccccccccdddddddd l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 4)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
UpperCAmelCase_ = {'''eos_token''': '''>>>>|||<||<<|<<''', '''pad_token''': '''<<<<<|||>|>>>>|>'''}
UpperCAmelCase_ = tokenizer.add_special_tokens(_lowercase)
UpperCAmelCase_ = tokenizer.vocab_size
UpperCAmelCase_ = len(_lowercase)
self.assertNotEqual(_lowercase , 0)
self.assertEqual(_lowercase , _lowercase)
self.assertEqual(_lowercase , len(_lowercase))
self.assertEqual(_lowercase , all_size_a + len(_lowercase))
UpperCAmelCase_ = tokenizer.encode(
'''>>>>|||<||<<|<< aaaaabbbbbb low cccccccccdddddddd <<<<<|||>|>>>>|> l''' , add_special_tokens=_lowercase)
self.assertGreaterEqual(len(_lowercase) , 6)
self.assertGreater(tokens[0] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[0] , tokens[1])
self.assertGreater(tokens[-3] , tokenizer.vocab_size - 1)
self.assertGreater(tokens[-3] , tokens[-4])
self.assertEqual(tokens[0] , tokenizer.eos_token_id)
self.assertEqual(tokens[-3] , tokenizer.pad_token_id)
def __a ( self :Any) -> List[str]:
pass
def __a ( self :Any) -> Tuple:
pass
def __a ( self :Dict) -> Dict:
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = tokenizer.tokenize('''This is a test''')
# fmt: off
self.assertListEqual(_lowercase , [SPIECE_UNDERLINE, '''T''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''a''', SPIECE_UNDERLINE, '''t''', '''e''', '''s''', '''t'''])
# fmt: on
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_lowercase) , [4, 32, 11, 10, 12, 4, 10, 12, 4, 7, 4, 6, 5, 12, 6] , )
UpperCAmelCase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''')
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''92000''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(_lowercase)
# fmt: off
self.assertListEqual(_lowercase , [4, 30, 4, 20, 7, 12, 4, 25, 8, 13, 9, 4, 10, 9, 4, 3, 23, 4, 7, 9, 14, 4, 6, 11, 10, 12, 4, 10, 12, 4, 19, 7, 15, 12, 73, 26])
# fmt: on
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(_lowercase)
self.assertListEqual(
_lowercase , [SPIECE_UNDERLINE, '''I''', SPIECE_UNDERLINE, '''w''', '''a''', '''s''', SPIECE_UNDERLINE, '''b''', '''o''', '''r''', '''n''', SPIECE_UNDERLINE, '''i''', '''n''', SPIECE_UNDERLINE, '''<unk>''', ''',''', SPIECE_UNDERLINE, '''a''', '''n''', '''d''', SPIECE_UNDERLINE, '''t''', '''h''', '''i''', '''s''', SPIECE_UNDERLINE, '''i''', '''s''', SPIECE_UNDERLINE, '''f''', '''a''', '''l''', '''s''', '''é''', '''.'''])
@slow
def __a ( self :Any) -> List[Any]:
# Use custom sequence because this tokenizer does not handle numbers.
UpperCAmelCase_ = [
'''Transformers (formerly known as pytorch-transformers and pytorch-pretrained-bert) provides '''
'''general-purpose architectures (BERT, GPT, RoBERTa, XLM, DistilBert, XLNet...) for Natural '''
'''Language Understanding (NLU) and Natural Language Generation (NLG) with over thirty-two pretrained '''
'''models in one hundred plus languages and deep interoperability between Jax, PyTorch and TensorFlow.''',
'''BERT is designed to pre-train deep bidirectional representations from unlabeled text by jointly '''
'''conditioning on both left and right context in all layers.''',
'''The quick brown fox jumps over the lazy dog.''',
]
# fmt: off
UpperCAmelCase_ = {
'''input_ids''': [
[4, 32, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 64, 19, 8, 13, 18, 5, 13, 15, 22, 4, 28, 9, 8, 20, 9, 4, 7, 12, 4, 24, 22, 6, 8, 13, 17, 11, 39, 6, 13, 7, 9, 12, 19, 8, 13, 18, 5, 13, 12, 4, 7, 9, 14, 4, 24, 22, 6, 8, 13, 17, 11, 39, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 39, 25, 5, 13, 6, 63, 4, 24, 13, 8, 27, 10, 14, 5, 12, 4, 21, 5, 9, 5, 13, 7, 15, 39, 24, 16, 13, 24, 8, 12, 5, 4, 7, 13, 17, 11, 10, 6, 5, 17, 6, 16, 13, 5, 12, 4, 64, 40, 47, 54, 32, 23, 4, 53, 49, 32, 23, 4, 54, 8, 40, 47, 54, 32, 7, 23, 4, 69, 52, 43, 23, 4, 51, 10, 12, 6, 10, 15, 40, 5, 13, 6, 23, 4, 69, 52, 48, 5, 6, 26, 26, 26, 63, 4, 19, 8, 13, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 61, 9, 14, 5, 13, 12, 6, 7, 9, 14, 10, 9, 21, 4, 64, 48, 52, 61, 63, 4, 7, 9, 14, 4, 48, 7, 6, 16, 13, 7, 15, 4, 52, 7, 9, 21, 16, 7, 21, 5, 4, 53, 5, 9, 5, 13, 7, 6, 10, 8, 9, 4, 64, 48, 52, 53, 63, 4, 20, 10, 6, 11, 4, 8, 27, 5, 13, 4, 6, 11, 10, 13, 6, 22, 39, 6, 20, 8, 4, 24, 13, 5, 6, 13, 7, 10, 9, 5, 14, 4, 18, 8, 14, 5, 15, 12, 4, 10, 9, 4, 8, 9, 5, 4, 11, 16, 9, 14, 13, 5, 14, 4, 24, 15, 16, 12, 4, 15, 7, 9, 21, 16, 7, 21, 5, 12, 4, 7, 9, 14, 4, 14, 5, 5, 24, 4, 10, 9, 6, 5, 13, 8, 24, 5, 13, 7, 25, 10, 15, 10, 6, 22, 4, 25, 5, 6, 20, 5, 5, 9, 4, 58, 7, 37, 23, 4, 49, 22, 32, 8, 13, 17, 11, 4, 7, 9, 14, 4, 32, 5, 9, 12, 8, 13, 55, 15, 8, 20, 26, 2],
[4, 40, 47, 54, 32, 4, 10, 12, 4, 14, 5, 12, 10, 21, 9, 5, 14, 4, 6, 8, 4, 24, 13, 5, 39, 6, 13, 7, 10, 9, 4, 14, 5, 5, 24, 4, 25, 10, 14, 10, 13, 5, 17, 6, 10, 8, 9, 7, 15, 4, 13, 5, 24, 13, 5, 12, 5, 9, 6, 7, 6, 10, 8, 9, 12, 4, 19, 13, 8, 18, 4, 16, 9, 15, 7, 25, 5, 15, 5, 14, 4, 6, 5, 37, 6, 4, 25, 22, 4, 46, 8, 10, 9, 6, 15, 22, 4, 17, 8, 9, 14, 10, 6, 10, 8, 9, 10, 9, 21, 4, 8, 9, 4, 25, 8, 6, 11, 4, 15, 5, 19, 6, 4, 7, 9, 14, 4, 13, 10, 21, 11, 6, 4, 17, 8, 9, 6, 5, 37, 6, 4, 10, 9, 4, 7, 15, 15, 4, 15, 7, 22, 5, 13, 12, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[4, 32, 11, 5, 4, 45, 16, 10, 17, 28, 4, 25, 13, 8, 20, 9, 4, 19, 8, 37, 4, 46, 16, 18, 24, 12, 4, 8, 27, 5, 13, 4, 6, 11, 5, 4, 15, 7, 57, 22, 4, 14, 8, 21, 26, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
}
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_lowercase , model_name='''microsoft/speecht5_asr''' , revision='''c5ef64c71905caeccde0e4462ef3f9077224c524''' , sequences=_lowercase , )
| 561 | 1 |
'''simple docstring'''
__UpperCAmelCase = {
0: '''0''',
1: '''1''',
2: '''2''',
3: '''3''',
4: '''4''',
5: '''5''',
6: '''6''',
7: '''7''',
8: '''8''',
9: '''9''',
10: '''a''',
11: '''b''',
12: '''c''',
13: '''d''',
14: '''e''',
15: '''f''',
}
def _snake_case ( A ) -> str:
assert type(A ) in (int, float) and decimal == int(A )
lowerCAmelCase__ = int(A )
lowerCAmelCase__ = ''''''
lowerCAmelCase__ = False
if decimal < 0:
lowerCAmelCase__ = True
decimal *= -1
while decimal > 0:
lowerCAmelCase__ , lowerCAmelCase__ = divmod(A , 16 )
lowerCAmelCase__ = values[remainder] + hexadecimal
lowerCAmelCase__ = '''0x''' + hexadecimal
if negative:
lowerCAmelCase__ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod() | 90 |
'''simple docstring'''
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
lowerCAmelCase : Optional[int] = logging.get_logger(__name__)
lowerCAmelCase : Tuple = """Hello world! cécé herlolip"""
def _A ( A ,A ,A ) -> str:
lowercase : Optional[Any] = FairseqRobertaModel.from_pretrained(A )
roberta.eval() # disable dropout
lowercase : int = roberta.model.encoder.sentence_encoder
lowercase : Tuple = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings ,hidden_size=roberta.cfg.model.encoder_embed_dim ,num_hidden_layers=roberta.cfg.model.encoder_layers ,num_attention_heads=roberta.cfg.model.encoder_attention_heads ,intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim ,max_position_embeddings=5_1_4 ,type_vocab_size=1 ,layer_norm_eps=1e-5 ,)
if classification_head:
lowercase : Tuple = roberta.model.classification_heads["mnli"].out_proj.weight.shape[0]
print("Our RoBERTa config:" ,A )
lowercase : Union[str, Any] = XLMRobertaXLForSequenceClassification(A ) if classification_head else XLMRobertaXLForMaskedLM(A )
model.eval()
# Now let's copy all the weights.
# Embeddings
lowercase : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
lowercase : Dict = roberta_sent_encoder.embed_positions.weight
lowercase : str = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
lowercase : Dict = roberta_sent_encoder.layer_norm.weight
lowercase : str = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
lowercase : BertLayer = model.roberta.encoder.layer[i]
lowercase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
lowercase : RobertaAttention = layer.attention
lowercase : str = roberta_layer.self_attn_layer_norm.weight
lowercase : List[str] = roberta_layer.self_attn_layer_norm.bias
# self attention
lowercase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
lowercase : str = roberta_layer.self_attn.q_proj.weight
lowercase : List[str] = roberta_layer.self_attn.q_proj.bias
lowercase : Union[str, Any] = roberta_layer.self_attn.k_proj.weight
lowercase : List[Any] = roberta_layer.self_attn.k_proj.bias
lowercase : Dict = roberta_layer.self_attn.v_proj.weight
lowercase : Tuple = roberta_layer.self_attn.v_proj.bias
# self-attention output
lowercase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
lowercase : Tuple = roberta_layer.self_attn.out_proj.weight
lowercase : str = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
lowercase : List[Any] = roberta_layer.final_layer_norm.weight
lowercase : Optional[Any] = roberta_layer.final_layer_norm.bias
# intermediate
lowercase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase : Optional[int] = roberta_layer.fca.weight
lowercase : Dict = roberta_layer.fca.bias
# output
lowercase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
lowercase : Optional[int] = roberta_layer.fca.weight
lowercase : str = roberta_layer.fca.bias
# end of layer
if classification_head:
lowercase : int = roberta.model.classification_heads["mnli"].dense.weight
lowercase : List[Any] = roberta.model.classification_heads["mnli"].dense.bias
lowercase : Union[str, Any] = roberta.model.classification_heads["mnli"].out_proj.weight
lowercase : Any = roberta.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
lowercase : str = roberta.model.encoder.lm_head.dense.weight
lowercase : List[Any] = roberta.model.encoder.lm_head.dense.bias
lowercase : str = roberta.model.encoder.lm_head.layer_norm.weight
lowercase : Optional[Any] = roberta.model.encoder.lm_head.layer_norm.bias
lowercase : int = roberta.model.encoder.lm_head.weight
lowercase : Optional[int] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
lowercase : torch.Tensor = roberta.encode(A ).unsqueeze(0 ) # batch of size 1
lowercase : Tuple = model(A )[0]
if classification_head:
lowercase : Union[str, Any] = roberta.model.classification_heads["mnli"](roberta.extract_features(A ) )
else:
lowercase : Optional[Any] = roberta.model(A )[0]
print(our_output.shape ,their_output.shape )
lowercase : Optional[Any] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'''max_absolute_diff = {max_absolute_diff}''' ) # ~ 1e-7
lowercase : Dict = torch.allclose(A ,A ,atol=1e-3 )
print("Do both models output the same tensors?" ,"🔥" if success else "💩" )
if not success:
raise Exception("Something went wRoNg" )
pathlib.Path(A ).mkdir(parents=A ,exist_ok=A )
print(F'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(A )
if __name__ == "__main__":
lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 372 | 0 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a = logging.getLogger(__name__)
a = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
a = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'The model checkpoint for weights initialization. Leave None if you want to train a model from'
' scratch.'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__magic_name__ )} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class UpperCamelCase__ :
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'The input training data file (a text file).'} )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={
'help': (
'The input training data files (multiple files in glob format). '
'Very often splitting large files to smaller files can prevent tokenizer going out of memory'
)
} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'An optional input train ref data file for whole word mask in Chinese.'} , )
__SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__magic_name__ , metadata={'help': 'An optional input eval ref data file for whole word mask in Chinese.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Whether distinct lines of text in the dataset are to be handled as distinct sequences.'} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Train with masked-language modeling loss instead of language modeling.'} )
__SCREAMING_SNAKE_CASE : bool = field(default=__magic_name__ , metadata={'help': 'Whether ot not to use whole word mask.'} )
__SCREAMING_SNAKE_CASE : float = field(
default=0.1_5 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
__SCREAMING_SNAKE_CASE : float = field(
default=1 / 6 , metadata={
'help': (
'Ratio of length of a span of masked tokens to surrounding context length for permutation language'
' modeling.'
)
} , )
__SCREAMING_SNAKE_CASE : int = field(
default=5 , metadata={'help': 'Maximum length of a span of masked tokens for permutation language modeling.'} )
__SCREAMING_SNAKE_CASE : int = field(
default=-1 , metadata={
'help': (
'Optional input sequence length after tokenization.'
'The training dataset will be truncated in block of this size for training.'
'Default to the model max input length for single sentence inputs (take into account special tokens).'
)
} , )
__SCREAMING_SNAKE_CASE : bool = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def UpperCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = False , UpperCAmelCase__ = None , ):
def _dataset(UpperCAmelCase__ , UpperCAmelCase__=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("""You need to set world whole masking and mlm to True for Chinese Whole Word Mask""" )
return LineByLineWithRefDataset(
tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size , ref_path=UpperCAmelCase__ , )
return LineByLineTextDataset(tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=UpperCAmelCase__ , file_path=UpperCAmelCase__ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=UpperCAmelCase__ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(UpperCAmelCase__ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCAmelCase_ ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase_ , lowercase_ , lowercase_ = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"""Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file """
"""or remove the --do_eval argument.""" )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
lowercase_ = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase_ = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
lowercase_ = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.tokenizer_name:
lowercase_ = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
lowercase_ = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"""
""" script, save it,and load it from here, using --tokenizer_name""" )
if model_args.model_name_or_path:
lowercase_ = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , )
else:
logger.info("""Training new model from scratch""" )
lowercase_ = AutoModelWithLMHead.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"""BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"""
"""--mlm flag (masked language modeling).""" )
if data_args.block_size <= 0:
lowercase_ = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
lowercase_ = min(data_args.block_size , tokenizer.max_len )
# Get datasets
lowercase_ = (
get_dataset(UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
lowercase_ = (
get_dataset(UpperCAmelCase__ , tokenizer=UpperCAmelCase__ , evaluate=UpperCAmelCase__ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
lowercase_ = DataCollatorForPermutationLanguageModeling(
tokenizer=UpperCAmelCase__ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
lowercase_ = DataCollatorForWholeWordMask(
tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
else:
lowercase_ = DataCollatorForLanguageModeling(
tokenizer=UpperCAmelCase__ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
lowercase_ = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , train_dataset=UpperCAmelCase__ , eval_dataset=UpperCAmelCase__ , prediction_loss_only=UpperCAmelCase__ , )
# Training
if training_args.do_train:
lowercase_ = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=UpperCAmelCase__ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowercase_ = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowercase_ = trainer.evaluate()
lowercase_ = math.exp(eval_output["""eval_loss"""] )
lowercase_ = {"""perplexity""": perplexity}
lowercase_ = os.path.join(training_args.output_dir , """eval_results_lm.txt""" )
if trainer.is_world_master():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key in sorted(result.keys() ):
logger.info(""" %s = %s""" , UpperCAmelCase__ , str(result[key] ) )
writer.write("""%s = %s\n""" % (key, str(result[key] )) )
results.update(UpperCAmelCase__ )
return results
def UpperCAmelCase_ ( UpperCAmelCase__ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 650 |
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 650 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ ={
'configuration_timesformer': ['TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimesformerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimesformerModel',
'TimesformerForVideoClassification',
'TimesformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 521 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class UpperCamelCase__ :
@staticmethod
def lowerCAmelCase (*snake_case_ : int , **snake_case_ : List[str] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Any = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowerCAmelCase (self : Tuple , snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Dict ):
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , image_processor=snake_case_ )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowerCAmelCase (self : Tuple , snake_case_ : List[str] , snake_case_ : Any ):
__a : Any = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
import datasets
__a : List[Any] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__a : List[str] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__a : Optional[int] = object_detector(snake_case_ , threshold=0.0 )
self.assertEqual(len(snake_case_ ) , len(snake_case_ ) )
for outputs in batch_outputs:
self.assertGreater(len(snake_case_ ) , 0 )
for detected_object in outputs:
self.assertEqual(
snake_case_ , {
'''score''': ANY(snake_case_ ),
'''label''': ANY(snake_case_ ),
'''box''': {'''xmin''': ANY(snake_case_ ), '''ymin''': ANY(snake_case_ ), '''xmax''': ANY(snake_case_ ), '''ymax''': ANY(snake_case_ )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowerCAmelCase (self : int ):
pass
@require_torch
def lowerCAmelCase (self : Tuple ):
__a : str = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : int = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Union[str, Any] = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : List[str] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
] , )
__a : Dict = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_5_9, '''ymin''': 1_2_0, '''xmax''': 4_8_0, '''ymax''': 3_5_9}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : List[Any] ):
__a : Optional[Any] = '''facebook/detr-resnet-50'''
__a : int = AutoModelForObjectDetection.from_pretrained(snake_case_ )
__a : str = AutoFeatureExtractor.from_pretrained(snake_case_ )
__a : Tuple = ObjectDetectionPipeline(model=snake_case_ , feature_extractor=snake_case_ )
__a : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : Optional[Any] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Optional[int] ):
__a : Any = '''facebook/detr-resnet-50'''
__a : Optional[Any] = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[int] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
__a : List[str] = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 4_0, '''ymin''': 7_0, '''xmax''': 1_7_5, '''ymax''': 1_1_7}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_3_3, '''ymin''': 7_2, '''xmax''': 3_6_8, '''ymax''': 1_8_7}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_3_9, '''ymax''': 4_7_3}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
],
] , )
@require_torch
@slow
def lowerCAmelCase (self : Union[str, Any] ):
__a : Tuple = 0.9985
__a : Tuple = '''facebook/detr-resnet-50'''
__a : int = pipeline('''object-detection''' , model=snake_case_ )
__a : Optional[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=snake_case_ )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 1_3, '''ymin''': 5_2, '''xmax''': 3_1_4, '''ymax''': 4_7_0}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_4_5, '''ymin''': 2_3, '''xmax''': 6_4_0, '''ymax''': 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def lowerCAmelCase (self : List[str] ):
__a : Optional[int] = '''Narsil/layoutlmv3-finetuned-funsd'''
__a : Any = 0.9993
__a : Tuple = pipeline('''object-detection''' , model=snake_case_ , threshold=snake_case_ )
__a : Optional[Any] = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(snake_case_ , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_9_4, '''ymin''': 2_5_4, '''xmax''': 3_4_3, '''ymax''': 2_6_4}},
] , )
| 521 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class A ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : int):
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding="utf-8" , check=_UpperCamelCase , )
assert hasattr(self , "env")
def UpperCAmelCase__ ( self : Union[str, Any] , _UpperCamelCase : Dict):
# configuration for running training on smdistributed Model Parallel
_lowercase: Union[str, Any] = {
"enabled": True,
"processes_per_host": 8,
}
_lowercase: Union[str, Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
_lowercase: str = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
_lowercase: Optional[Any] = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-{instance_count}-smp-{name_extension}" , instance_count=_UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=_UpperCamelCase , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=_UpperCamelCase , py_version="py36" , )
def UpperCAmelCase__ ( self : str , _UpperCamelCase : List[str]):
TrainingJobAnalytics(_UpperCamelCase).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
@parameterized.expand([(1,)])
def UpperCAmelCase__ ( self : Dict , _UpperCamelCase : Dict):
# create estimator
_lowercase: Dict = self.create_estimator(_UpperCamelCase)
# run training
estimator.fit()
# result dataframe
_lowercase: Tuple = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
_lowercase: List[str] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"])
_lowercase: int = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase: Optional[Any] = (
Session().describe_training_job(estimator.latest_training_job.name).get("TrainingTimeInSeconds" , 999_999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy)
assert all(t <= self.results["eval_loss"] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , "w") as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , _UpperCamelCase)
| 713 |
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
return abs(__magic_name__ ) if a == 0 else greatest_common_divisor(b % a , __magic_name__ )
def __lowerCAmelCase ( __magic_name__ , __magic_name__ ):
while y: # --> when y=0 then loop will terminate and return x as final GCD.
_lowercase , _lowercase: Any = y, x % y
return abs(__magic_name__ )
def __lowerCAmelCase ( ):
try:
_lowercase: Optional[int] = input("Enter two integers separated by comma (,): " ).split("," )
_lowercase: Any = int(nums[0] )
_lowercase: int = int(nums[1] )
print(
f"greatest_common_divisor({num_a}, {num_a}) = "
f"{greatest_common_divisor(__magic_name__ , __magic_name__ )}" )
print(f"By iterative gcd({num_a}, {num_a}) = {gcd_by_iterative(__magic_name__ , __magic_name__ )}" )
except (IndexError, UnboundLocalError, ValueError):
print("Wrong input" )
if __name__ == "__main__":
main()
| 206 | 0 |
import socket
def a_ ( ):
lowerCAmelCase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowerCAmelCase__ = socket.gethostname()
lowerCAmelCase__ = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
lowerCAmelCase__ = sock.recv(10_24 )
if not data:
break
out_file.write(__lowerCAmelCase )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 615 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__magic_name__ : Optional[Any] = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ : int = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
__magic_name__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 615 | 1 |
from pickle import UnpicklingError
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict
from ..utils import logging
A = logging.get_logger(__name__)
def lowerCamelCase ( UpperCamelCase : Any , UpperCamelCase : Optional[int] ) -> Any:
try:
with open(UpperCamelCase , 'rb' ) as flax_state_f:
_lowerCamelCase = from_bytes(UpperCamelCase , flax_state_f.read() )
except UnpicklingError as e:
try:
with open(UpperCamelCase ) as f:
if f.read().startswith('version' ):
raise OSError(
'You seem to have cloned a repository without having git-lfs installed. Please'
' install git-lfs and run `git lfs install` followed by `git lfs pull` in the'
' folder you cloned.' )
else:
raise ValueError from e
except (UnicodeDecodeError, ValueError):
raise EnvironmentError(F"""Unable to convert {model_file} to Flax deserializable object. """ )
return load_flax_weights_in_pytorch_model(UpperCamelCase , UpperCamelCase )
def lowerCamelCase ( UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] ) -> Tuple:
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading Flax weights in PyTorch requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
_lowerCamelCase = flatten_dict(jax.tree_util.tree_map(lambda UpperCamelCase : x.dtype == jnp.bfloataa , UpperCamelCase ) ).values()
if any(UpperCamelCase ):
# convert all weights to fp32 if they are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
_lowerCamelCase = jax.tree_util.tree_map(
lambda UpperCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , UpperCamelCase )
_lowerCamelCase = ''
_lowerCamelCase = flatten_dict(UpperCamelCase , sep='.' )
_lowerCamelCase = pt_model.state_dict()
# keep track of unexpected & missing keys
_lowerCamelCase = []
_lowerCamelCase = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCamelCase = flax_key_tuple.split('.' )
if flax_key_tuple_array[-1] == "kernel" and flax_tensor.ndim == 4:
_lowerCamelCase = flax_key_tuple_array[:-1] + ['weight']
_lowerCamelCase = jnp.transpose(UpperCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple_array[-1] == "kernel":
_lowerCamelCase = flax_key_tuple_array[:-1] + ['weight']
_lowerCamelCase = flax_tensor.T
elif flax_key_tuple_array[-1] == "scale":
_lowerCamelCase = flax_key_tuple_array[:-1] + ['weight']
if "time_embedding" not in flax_key_tuple_array:
for i, flax_key_tuple_string in enumerate(UpperCamelCase ):
_lowerCamelCase = (
flax_key_tuple_string.replace('_0' , '.0' )
.replace('_1' , '.1' )
.replace('_2' , '.2' )
.replace('_3' , '.3' )
.replace('_4' , '.4' )
.replace('_5' , '.5' )
.replace('_6' , '.6' )
.replace('_7' , '.7' )
.replace('_8' , '.8' )
.replace('_9' , '.9' )
)
_lowerCamelCase = '.'.join(UpperCamelCase )
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
F"""Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected """
F"""to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.""" )
else:
# add weight to pytorch dict
_lowerCamelCase = np.asarray(UpperCamelCase ) if not isinstance(UpperCamelCase , np.ndarray ) else flax_tensor
_lowerCamelCase = torch.from_numpy(UpperCamelCase )
# remove from missing keys
missing_keys.remove(UpperCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(UpperCamelCase )
pt_model.load_state_dict(UpperCamelCase )
# re-transform missing_keys to list
_lowerCamelCase = list(UpperCamelCase )
if len(UpperCamelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
F""" {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing"""
F""" {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture"""
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
F""" IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect"""
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
if len(UpperCamelCase ) > 0:
logger.warning(
F"""Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly"""
F""" initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to"""
' use it for predictions and inference.' )
return pt_model | 234 | from __future__ import annotations
from itertools import permutations
from random import randint
from timeit import repeat
def lowerCamelCase ( ) -> tuple[list[int], int]:
_lowerCamelCase = [randint(-10_00 , 10_00 ) for i in range(10 )]
_lowerCamelCase = randint(-50_00 , 50_00 )
return (arr, r)
A = make_dataset()
def lowerCamelCase ( UpperCamelCase : list[int] , UpperCamelCase : int ) -> tuple[int, ...]:
for triplet in permutations(UpperCamelCase , 3 ):
if sum(UpperCamelCase ) == target:
return tuple(sorted(UpperCamelCase ) )
return (0, 0, 0)
def lowerCamelCase ( UpperCamelCase : list[int] , UpperCamelCase : int ) -> tuple[int, int, int]:
arr.sort()
_lowerCamelCase = len(UpperCamelCase )
for i in range(n - 1 ):
_lowerCamelCase , _lowerCamelCase = i + 1, n - 1
while left < right:
if arr[i] + arr[left] + arr[right] == target:
return (arr[i], arr[left], arr[right])
elif arr[i] + arr[left] + arr[right] < target:
left += 1
elif arr[i] + arr[left] + arr[right] > target:
right -= 1
return (0, 0, 0)
def lowerCamelCase ( ) -> tuple[float, float]:
_lowerCamelCase = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n'
_lowerCamelCase = '\ntriplet_sum1(*dataset)\n'
_lowerCamelCase = '\ntriplet_sum2(*dataset)\n'
_lowerCamelCase = repeat(setup=UpperCamelCase , stmt=UpperCamelCase , repeat=5 , number=1_00_00 )
_lowerCamelCase = repeat(setup=UpperCamelCase , stmt=UpperCamelCase , repeat=5 , number=1_00_00 )
return (min(UpperCamelCase ), min(UpperCamelCase ))
if __name__ == "__main__":
from doctest import testmod
testmod()
A = solution_times()
print(F'''The time for naive implementation is {times[0]}.''')
print(F'''The time for optimized implementation is {times[1]}.''') | 234 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 199 |
'''simple docstring'''
import math
def snake_case_ ( lowercase__ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case_ ( lowercase__ = 0.1 ):
UpperCAmelCase__ : Dict = 3
UpperCAmelCase__ : Tuple = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(lowercase__ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 199 | 1 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowerCamelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def lowerCAmelCase_ ( self : Any ):
a__ = tempfile.mkdtemp()
a__ = 8
# DPR tok
a__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
a__ = os.path.join(self.tmpdirname ,"dpr_tokenizer" )
os.makedirs(a__ ,exist_ok=a__ )
a__ = os.path.join(a__ ,DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
a__ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
a__ = dict(zip(a__ ,range(len(a__ ) ) ) )
a__ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
a__ = {"unk_token": "<unk>"}
a__ = os.path.join(self.tmpdirname ,"bart_tokenizer" )
os.makedirs(a__ ,exist_ok=a__ )
a__ = os.path.join(a__ ,BART_VOCAB_FILES_NAMES["vocab_file"] )
a__ = os.path.join(a__ ,BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(a__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(a__ ) )
def lowerCAmelCase_ ( self : List[str] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"dpr_tokenizer" ) )
def lowerCAmelCase_ ( self : Optional[int] ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"dpr_tokenizer" ) )
def lowerCAmelCase_ ( self : Union[str, Any] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"bart_tokenizer" ) )
def lowerCAmelCase_ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" ,string_factory="Flat" ,metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowerCAmelCase_ ( self : Dict ):
a__ = self.get_dummy_dataset()
a__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,)
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
a__ = dataset
a__ = RagRetriever(
a__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
return retriever
def lowerCAmelCase_ ( self : Optional[int] ,a__ : bool ):
a__ = self.get_dummy_dataset()
a__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="custom" ,)
if from_disk:
a__ = os.path.join(self.tmpdirname ,"dataset" )
a__ = os.path.join(self.tmpdirname ,"index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname ,"index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname ,"dataset" ) )
del dataset
a__ = RagRetriever(
a__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,)
else:
a__ = RagRetriever(
a__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() ,index=CustomHFIndex(config.retrieval_vector_size ,a__ ) ,)
return retriever
def lowerCAmelCase_ ( self : List[str] ):
a__ = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" ,string_factory="Flat" ,metric_type=faiss.METRIC_INNER_PRODUCT )
a__ = os.path.join(self.tmpdirname ,"hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" ,index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] ,open(index_file_name + ".index_meta.dpr" ,"wb" ) )
a__ = os.path.join(self.tmpdirname ,"psgs_w100.tsv.pkl" )
a__ = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(a__ ,open(a__ ,"wb" ) )
a__ = RagConfig(
retrieval_vector_size=self.retrieval_vector_size ,question_encoder=DPRConfig().to_dict() ,generator=BartConfig().to_dict() ,index_name="legacy" ,index_path=self.tmpdirname ,)
a__ = RagRetriever(
a__ ,question_encoder_tokenizer=self.get_dpr_tokenizer() ,generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowerCAmelCase_ ( self : List[str] ):
a__ = 1
a__ = self.get_dummy_canonical_hf_index_retriever()
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ , a__ , a__ = retriever.retrieve(a__ ,n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) ,a__ )
self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
a__ = self.get_dummy_dataset()
retriever.save_pretrained(a__ )
a__ = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ ,a__ )
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ = retriever.retrieve(a__ ,n_docs=1 )
self.assertTrue(out is not None )
def lowerCAmelCase_ ( self : Optional[int] ):
a__ = 1
a__ = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ , a__ , a__ = retriever.retrieve(a__ ,n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) ,a__ )
self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def lowerCAmelCase_ ( self : Union[str, Any] ):
a__ = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
a__ = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ ,a__ )
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ = retriever.retrieve(a__ ,n_docs=1 )
self.assertTrue(out is not None )
def lowerCAmelCase_ ( self : List[str] ):
a__ = 1
a__ = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ , a__ , a__ = retriever.retrieve(a__ ,n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) ,a__ )
self.assertEqual(doc_dicts[0]["id"][0] ,"1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] ,"0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def lowerCAmelCase_ ( self : Dict ):
a__ = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
a__ = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ ,a__ )
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ = retriever.retrieve(a__ ,n_docs=1 )
self.assertTrue(out is not None )
def lowerCAmelCase_ ( self : Optional[Any] ):
a__ = 1
a__ = self.get_dummy_legacy_index_retriever()
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ , a__ , a__ = retriever.retrieve(a__ ,n_docs=a__ )
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(a__ ) ,2 )
self.assertEqual(sorted(doc_dicts[0] ) ,["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) ,a__ )
self.assertEqual(doc_dicts[0]["text"][0] ,"bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] ,"foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() ,[[1], [0]] )
def lowerCAmelCase_ ( self : List[Any] ):
a__ = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(a__ )
a__ = RagRetriever.from_pretrained(a__ )
self.assertIsInstance(a__ ,a__ )
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ = retriever.retrieve(a__ ,n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCAmelCase_ ( self : List[Any] ):
import torch
a__ = 1
a__ = self.get_dummy_canonical_hf_index_retriever()
a__ = [[5, 7], [10, 11]]
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ = retriever(a__ ,a__ ,prefix=retriever.config.generator.prefix ,n_docs=a__ )
a__ , a__ , a__ = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a__ ,a__ )
self.assertIsInstance(a__ ,a__ )
self.assertIsInstance(a__ ,np.ndarray )
a__ = retriever(
a__ ,a__ ,prefix=retriever.config.generator.prefix ,n_docs=a__ ,return_tensors="pt" ,)
a__ , a__ , a__ , a__ = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape ,(2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(a__ ,torch.Tensor )
self.assertIsInstance(a__ ,torch.Tensor )
self.assertIsInstance(a__ ,torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowerCAmelCase_ ( self : Dict ):
a__ = self.get_dpr_ctx_encoder_tokenizer()
a__ = 1
a__ = self.get_dummy_custom_hf_index_retriever(from_disk=a__ )
retriever.set_ctx_encoder_tokenizer(a__ )
a__ = [[5, 7], [10, 11]]
a__ = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] ,dtype=np.floataa )
a__ = retriever(a__ ,a__ ,prefix=retriever.config.generator.prefix ,n_docs=a__ )
self.assertEqual(
len(a__ ) ,6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) ,a__ ) # check for doc token related keys in dictionary.
| 394 |
'''simple docstring'''
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if bit_count < 0:
raise ValueError("The given input must be positive" )
# get the generated string sequence
a__ = gray_code_sequence_string(_lowercase )
#
# convert them to integers
for i in range(len(_lowercase ) ):
a__ = int(sequence[i] , 2 )
return sequence
def _lowerCAmelCase (_lowercase ):
"""simple docstring"""
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
a__ = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
a__ = gray_code_sequence_string(bit_count - 1 )
a__ = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
a__ = "0" + smaller_sequence[i]
sequence.append(_lowercase )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
a__ = "1" + smaller_sequence[i]
sequence.append(_lowercase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 394 | 1 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
__snake_case = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
__snake_case = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
__snake_case = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> Optional[int]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=1 , lowerCamelCase__="binary" , lowerCamelCase__=None , lowerCamelCase__="warn" , ) -> Tuple:
lowercase__ : int = recall_score(
lowerCamelCase__ , lowerCamelCase__ , labels=lowerCamelCase__ , pos_label=lowerCamelCase__ , average=lowerCamelCase__ , sample_weight=lowerCamelCase__ , zero_division=lowerCamelCase__ , )
return {"recall": float(lowerCamelCase__ ) if score.size == 1 else score} | 200 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '▁'
__snake_case = {'vocab_file': 'sentencepiece.bpe.model'}
__snake_case = {
'vocab_file': {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model',
}
}
__snake_case = {
'facebook/xglm-564M': 2048,
}
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
_a : Optional[int] = VOCAB_FILES_NAMES
_a : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_a : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a : Any = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None:
lowercase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
lowercase__ : Any = 7
lowercase__ : Dict = [F'''<madeupword{i}>''' for i in range(self.num_madeup_words )]
lowercase__ : Tuple = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , )
lowercase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(lowerCamelCase__ ) )
lowercase__ : Optional[Any] = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowercase__ : Optional[Any] = 1
# Mimic fairseq token-to-id alignment for the first 4 token
lowercase__ : Any = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
lowercase__ : Optional[int] = len(self.sp_model )
lowercase__ : Union[str, Any] = {F'''<madeupword{i}>''': sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(lowerCamelCase__ )
lowercase__ : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self ) -> Dict:
lowercase__ : Any = self.__dict__.copy()
lowercase__ : List[str] = None
lowercase__ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , lowerCamelCase__ ) -> Optional[int]:
lowercase__ : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
lowercase__ : Union[str, Any] = {}
lowercase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
lowercase__ : List[str] = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ ))
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ ))
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[int]:
lowercase__ : str = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def UpperCAmelCase__( self ) -> Union[str, Any]:
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def UpperCAmelCase__( self ) -> int:
lowercase__ : Optional[Any] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCAmelCase__( self , lowerCamelCase__ ) -> List[str]:
return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowercase__ : List[Any] = self.sp_model.PieceToId(lowerCamelCase__ )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCAmelCase__( self , lowerCamelCase__ ) -> int:
lowercase__ : Dict = """""".join(lowerCamelCase__ ).replace(lowerCamelCase__ , """ """ ).strip()
return out_string
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]:
if not os.path.isdir(lowerCamelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase__ : int = os.path.join(
lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , lowerCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(lowerCamelCase__ , """wb""" ) as fi:
lowercase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (out_vocab_file,) | 200 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_beit import BeitImageProcessor
__lowercase : int = logging.get_logger(__name__)
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> None:
'''simple docstring'''
warnings.warn(
'''The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use BeitImageProcessor instead.''' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) | 709 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowercase : Optional[Any] = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[Any] = "van"
def __init__( self , UpperCamelCase__=224 , UpperCamelCase__=3 , UpperCamelCase__=[7, 3, 3, 3] , UpperCamelCase__=[4, 2, 2, 2] , UpperCamelCase__=[64, 128, 320, 512] , UpperCamelCase__=[3, 3, 12, 3] , UpperCamelCase__=[8, 8, 4, 4] , UpperCamelCase__="gelu" , UpperCamelCase__=0.02 , UpperCamelCase__=1e-6 , UpperCamelCase__=1e-2 , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , **UpperCamelCase__ , ) -> List[Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ = image_size
lowerCamelCase_ = num_channels
lowerCamelCase_ = patch_sizes
lowerCamelCase_ = strides
lowerCamelCase_ = hidden_sizes
lowerCamelCase_ = depths
lowerCamelCase_ = mlp_ratios
lowerCamelCase_ = hidden_act
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = layer_scale_init_value
lowerCamelCase_ = drop_path_rate
lowerCamelCase_ = dropout_rate | 66 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class snake_case__ ( _lowerCAmelCase ):
lowercase__ : str = '''facebook/bart-large-mnli'''
lowercase__ : Optional[Any] = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowercase__ : Tuple = '''text_classifier'''
lowercase__ : int = AutoTokenizer
lowercase__ : Optional[Any] = AutoModelForSequenceClassification
lowercase__ : Optional[Any] = ['''text''', ['''text''']]
lowercase__ : Union[str, Any] = ['''text''']
def __magic_name__ ( self ) -> Tuple:
super().setup()
__magic_name__ : List[Any] = self.model.config
__magic_name__ : Optional[int] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
__magic_name__ : List[str] = int(lowerCAmelCase__ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def __magic_name__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
__magic_name__ : Optional[Any] = labels
return self.pre_processor(
[text] * len(lowerCAmelCase__ ) , [F'This example is {label}' for label in labels] , return_tensors="""pt""" , padding="""max_length""" , )
def __magic_name__ ( self , lowerCAmelCase__ ) -> Dict:
__magic_name__ : int = outputs.logits
__magic_name__ : Optional[int] = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 324 |
import math
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
__magic_name__ : Optional[int] = len(_A )
__magic_name__ : Tuple = int(math.floor(math.sqrt(_A ) ) )
__magic_name__ : Optional[int] = 0
while arr[min(_A, _A ) - 1] < x:
__magic_name__ : Tuple = step
step += int(math.floor(math.sqrt(_A ) ) )
if prev >= n:
return -1
while arr[prev] < x:
__magic_name__ : Union[str, Any] = prev + 1
if prev == min(_A, _A ):
return -1
if arr[prev] == x:
return prev
return -1
if __name__ == "__main__":
__magic_name__: List[Any] = input("Enter numbers separated by a comma:\n").strip()
__magic_name__: List[str] = [int(item) for item in user_input.split(",")]
__magic_name__: Optional[int] = int(input("Enter the number to be searched:\n"))
__magic_name__: str = jump_search(arr, x)
if res == -1:
print("Number not found!")
else:
print(F"""Number {x} is at index {res}""")
| 324 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase__ ( UpperCamelCase ,UpperCamelCase ,UpperCamelCase ,unittest.TestCase ):
lowerCAmelCase_ : Optional[int] = StableDiffusionInstructPixaPixPipeline
lowerCAmelCase_ : Union[str, Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""}
lowerCAmelCase_ : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase_ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self : str ) -> List[Any]:
'''simple docstring'''
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
A = PNDMScheduler(skip_prk_steps=snake_case )
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
A = CLIPTextModel(snake_case )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A_ ( self : str , snake_case : Optional[int] , snake_case : Optional[int]=0 ) -> Optional[int]:
'''simple docstring'''
A = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
A = image.cpu().permute(0 , 2 , 3 , 1 )[0]
A = Image.fromarray(np.uinta(snake_case ) ).convert('RGB' )
if str(snake_case ).startswith('mps' ):
A = torch.manual_seed(snake_case )
else:
A = torch.Generator(device=snake_case ).manual_seed(snake_case )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**snake_case )
A = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs(snake_case )
A = sd_pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self : Any ) -> Any:
'''simple docstring'''
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**snake_case )
A = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs(snake_case )
A = 'french fries'
A = sd_pipe(**snake_case , negative_prompt=snake_case )
A = output.images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
A = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**snake_case )
A = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs(snake_case )
A = [inputs['prompt']] * 2
A = np.array(inputs['image'] ).astype(np.floataa ) / 255.0
A = torch.from_numpy(snake_case ).unsqueeze(0 ).to(snake_case )
A = image / 2 + 0.5
A = image.permute(0 , 3 , 1 , 2 )
A = image.repeat(2 , 1 , 1 , 1 )
A = sd_pipe(**snake_case ).images
A = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
A = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
A = 'cpu' # ensure determinism for the device-dependent torch.Generator
A = self.get_dummy_components()
A = EulerAncestralDiscreteScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' )
A = StableDiffusionInstructPixaPixPipeline(**snake_case )
A = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
A = self.get_dummy_inputs(snake_case )
A = sd_pipe(**snake_case ).images
A = image[0, -3:, -3:, -1]
A = [round(snake_case , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(snake_case ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
A = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def A_ ( self : int ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def A_ ( self : Any ) -> Any:
'''simple docstring'''
A = self.get_dummy_components()
A = StableDiffusionInstructPixaPixPipeline(**snake_case )
A = VaeImageProcessor(do_resize=snake_case , do_normalize=snake_case )
A = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
A = pipe(**self.get_dummy_inputs_by_type(snake_case , input_image_type='pt' ) )[0]
A = components['vae']
A = self.get_dummy_inputs_by_type(snake_case , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
A = vae.encode(inputs[image_param] ).latent_dist.mode()
A = pipe(**snake_case )[0]
A = np.abs(out - out_latents_inputs ).max()
self.assertLess(snake_case , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
def A_ ( self : Optional[int] ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self : Any , snake_case : List[Any]=0 ) -> Optional[Any]:
'''simple docstring'''
A = torch.manual_seed(snake_case )
A = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
A = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def A_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A_ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case )
A = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
A = self.get_inputs()
A = pipe(**snake_case ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 512, 512, 3)
A = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def A_ ( self : List[Any] ) -> Any:
'''simple docstring'''
A = 0
def callback_fn(snake_case : int , snake_case : int , snake_case : torch.FloatTensor ) -> None:
A = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A = latents[0, -3:, -3:, -1]
A = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
A = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
A = latents[0, -3:, -3:, -1]
A = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
A = False
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case , torch_dtype=torch.floataa )
A = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
A = self.get_inputs()
pipe(**snake_case , callback=snake_case , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def A_ ( self : Union[str, Any] ) -> int:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=snake_case , torch_dtype=torch.floataa )
A = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A = self.get_inputs()
A = pipe(**snake_case )
A = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def A_ ( self : int ) -> Optional[int]:
'''simple docstring'''
A = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
A = inputs['image'].resize((504, 504) )
A = 'timbrooks/instruct-pix2pix'
A = StableDiffusionInstructPixaPixPipeline.from_pretrained(
snake_case , safety_checker=snake_case , )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
A = pipe(**snake_case )
A = output.images[0]
A = image[255:258, 383:386, -1]
assert image.shape == (504, 504, 3)
A = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 717 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
A = logging.get_logger(__name__)
A = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
A = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class UpperCAmelCase__ :
lowerCAmelCase_ : str = field(
default=UpperCamelCase ,metadata={"""help""": """Model type selected in the list: """ + """, """.join(UpperCamelCase )} )
lowerCAmelCase_ : str = field(
default=UpperCamelCase ,metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
lowerCAmelCase_ : int = field(
default=1_28 ,metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} ,)
lowerCAmelCase_ : int = field(
default=1_28 ,metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} ,)
lowerCAmelCase_ : int = field(
default=64 ,metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} ,)
lowerCAmelCase_ : int = field(
default=30 ,metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} ,)
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
lowerCAmelCase_ : bool = field(
default=UpperCamelCase ,metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
lowerCAmelCase_ : float = field(
default=0.0 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase_ : int = field(
default=20 ,metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
lowerCAmelCase_ : int = field(
default=0 ,metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} ,)
lowerCAmelCase_ : int = field(default=1 ,metadata={"""help""": """multiple threads for converting example to features"""} )
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : str = """train"""
lowerCAmelCase_ : Union[str, Any] = """dev"""
class UpperCAmelCase__ ( UpperCamelCase ):
lowerCAmelCase_ : SquadDataTrainingArguments
lowerCAmelCase_ : List[SquadFeatures]
lowerCAmelCase_ : Split
lowerCAmelCase_ : bool
def __init__( self : List[Any] , snake_case : SquadDataTrainingArguments , snake_case : PreTrainedTokenizer , snake_case : Optional[int] = None , snake_case : Union[str, Split] = Split.train , snake_case : Optional[bool] = False , snake_case : Optional[str] = None , snake_case : Optional[str] = "pt" , ) -> Optional[Any]:
'''simple docstring'''
A = args
A = is_language_sensitive
A = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(snake_case , snake_case ):
try:
A = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
A = mode
# Load data features from cache or dataset file
A = 'v2' if args.version_2_with_negative else 'v1'
A = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
A = cached_features_file + '.lock'
with FileLock(snake_case ):
if os.path.exists(snake_case ) and not args.overwrite_cache:
A = time.time()
A = torch.load(snake_case )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
A = self.old_features['features']
A = self.old_features.get('dataset' , snake_case )
A = self.old_features.get('examples' , snake_case )
logger.info(
f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in"""
' future run' )
else:
if mode == Split.dev:
A = self.processor.get_dev_examples(args.data_dir )
else:
A = self.processor.get_train_examples(args.data_dir )
A , A = squad_convert_examples_to_features(
examples=self.examples , tokenizer=snake_case , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=snake_case , )
A = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , snake_case , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" )
def __len__( self : Any ) -> Dict:
'''simple docstring'''
return len(self.features )
def __getitem__( self : Union[str, Any] , snake_case : List[str] ) -> Dict[str, torch.Tensor]:
'''simple docstring'''
A = self.features[i]
A = torch.tensor(feature.input_ids , dtype=torch.long )
A = torch.tensor(feature.attention_mask , dtype=torch.long )
A = torch.tensor(feature.token_type_ids , dtype=torch.long )
A = torch.tensor(feature.cls_index , dtype=torch.long )
A = torch.tensor(feature.p_mask , dtype=torch.float )
A = torch.tensor(feature.is_impossible , dtype=torch.float )
A = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
A = torch.tensor(feature.start_position , dtype=torch.long )
A = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs
| 109 | 0 |
"""simple docstring"""
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def lowercase_ ( _lowercase : Dict ):
'''simple docstring'''
for param in module.parameters():
UpperCAmelCase : Optional[Any] = False
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : str = "cuda" if torch.cuda.is_available() else "cpu"
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase : int = "mps"
if device == "mps":
print(
"WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"
" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"
" with generations." )
return device
def lowercase_ ( _lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : int = plt.imshow(_lowercase )
fig.axes.get_xaxis().set_visible(_lowercase )
fig.axes.get_yaxis().set_visible(_lowercase )
plt.show()
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase : Any = datetime.now()
UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" )
return timestamp
| 595 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
snake_case__ = logging.get_logger(__name__)
snake_case__ = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class lowerCAmelCase_ ( _a):
@add_start_docstrings(__A )
def __call__( self : Dict , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Union[str, Any] ) ->bool:
"""simple docstring"""
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class lowerCAmelCase_ ( _a):
def __init__( self : Dict , __A : int , __A : Optional[int] = None ) ->Dict:
"""simple docstring"""
a__ :List[str] = max_length
a__ :Union[str, Any] = max_position_embeddings
@add_start_docstrings(__A )
def __call__( self : int , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : List[str] ) ->bool:
"""simple docstring"""
a__ :Optional[int] = input_ids.shape[-1]
a__ :Dict = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class lowerCAmelCase_ ( _a):
def __init__( self : Any , __A : int , __A : int ) ->Optional[int]:
"""simple docstring"""
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , __A , )
a__ :Tuple = start_length
a__ :List[Any] = max_new_tokens
a__ :int = start_length + max_new_tokens
@add_start_docstrings(__A )
def __call__( self : Any , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : List[str] ) ->bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class lowerCAmelCase_ ( _a):
def __init__( self : Any , __A : float , __A : Optional[float] = None ) ->Tuple:
"""simple docstring"""
a__ :Any = max_time
a__ :List[str] = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__A )
def __call__( self : Any , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Union[str, Any] ) ->bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class lowerCAmelCase_ ( _a):
@add_start_docstrings(__A )
def __call__( self : List[str] , __A : torch.LongTensor , __A : torch.FloatTensor , **__A : Optional[Any] ) ->bool:
"""simple docstring"""
return any(criteria(__A , __A ) for criteria in self )
@property
def _snake_case ( self : int ) ->Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__A , __A ):
return stopping_criterium.max_length
elif isinstance(__A , __A ):
return stopping_criterium.max_length
return None
def lowerCamelCase__ ( a : StoppingCriteriaList , a : int ) -> StoppingCriteriaList:
"""simple docstring"""
a__ :Tuple = stopping_criteria.max_length
a__ :str = deepcopy(a )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , a )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=a ) )
return new_stopping_criteria
| 395 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
a_ = TypeVar("""T""")
class __lowerCAmelCase ( Generic[T] ):
def __init__( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = data
__lowerCamelCase = None
def __str__( self ):
'''simple docstring'''
return F"""{self.data}"""
class __lowerCAmelCase ( Generic[T] ):
def __init__( self ):
'''simple docstring'''
__lowerCamelCase = None
def __iter__( self ):
'''simple docstring'''
__lowerCamelCase = self.top
while node:
yield node.data
__lowerCamelCase = node.next
def __str__( self ):
'''simple docstring'''
return "->".join([str(__UpperCAmelCase ) for item in self] )
def __len__( self ):
'''simple docstring'''
return len(tuple(iter(self ) ) )
def lowerCamelCase ( self ):
'''simple docstring'''
return self.top is None
def lowerCamelCase ( self , __UpperCAmelCase ):
'''simple docstring'''
__lowerCamelCase = Node(__UpperCAmelCase )
if not self.is_empty():
__lowerCamelCase = self.top
__lowerCamelCase = node
def lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''' )
assert isinstance(self.top , __UpperCAmelCase )
__lowerCamelCase = self.top
__lowerCamelCase = self.top.next
return pop_node.data
def lowerCamelCase ( self ):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''' )
assert self.top is not None
return self.top.data
def lowerCamelCase ( self ):
'''simple docstring'''
__lowerCamelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 622 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
a_ = getLogger(__name__)
a_ = """cuda""" if torch.cuda.is_available() else """cpu"""
def a__ ( _UpperCamelCase : List[str] ,_UpperCamelCase : str ,_UpperCamelCase : str ,_UpperCamelCase : int = 8 ,_UpperCamelCase : str = DEFAULT_DEVICE ,_UpperCamelCase : Dict=False ,_UpperCamelCase : Dict="summarization" ,_UpperCamelCase : Optional[int]=None ,**_UpperCamelCase : Dict ,):
__lowerCamelCase = Path(_UpperCamelCase ).open('''w''' ,encoding='''utf-8''' )
__lowerCamelCase = str(_UpperCamelCase )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(_UpperCamelCase ).to(_UpperCamelCase )
if fpaa:
__lowerCamelCase = model.half()
__lowerCamelCase = AutoTokenizer.from_pretrained(_UpperCamelCase )
logger.info(F"""Inferred tokenizer type: {tokenizer.__class__}""" ) # if this is wrong, check config.model_type.
__lowerCamelCase = time.time()
# update config with task specific params
use_task_specific_params(_UpperCamelCase ,_UpperCamelCase )
if prefix is None:
__lowerCamelCase = prefix or getattr(model.config ,'''prefix''' ,'''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(_UpperCamelCase ,_UpperCamelCase ) ) ):
__lowerCamelCase = [prefix + text for text in examples_chunk]
__lowerCamelCase = tokenizer(_UpperCamelCase ,return_tensors='''pt''' ,truncation=_UpperCamelCase ,padding='''longest''' ).to(_UpperCamelCase )
__lowerCamelCase = model.generate(
input_ids=batch.input_ids ,attention_mask=batch.attention_mask ,**_UpperCamelCase ,)
__lowerCamelCase = tokenizer.batch_decode(_UpperCamelCase ,skip_special_tokens=_UpperCamelCase ,clean_up_tokenization_spaces=_UpperCamelCase )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowerCamelCase = int(time.time() - start_time ) # seconds
__lowerCamelCase = len(_UpperCamelCase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs ,4 )}
def a__ ( ):
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def a__ ( _UpperCamelCase : Union[str, Any]=True ):
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('''model_name''' ,type=_UpperCamelCase ,help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' ,type=_UpperCamelCase ,help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' ,type=_UpperCamelCase ,help='''where to save summaries''' )
parser.add_argument('''--reference_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default='''metrics.json''' ,help='''where to save metrics''' )
parser.add_argument('''--device''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' ,type=_UpperCamelCase ,required=_UpperCamelCase ,default=_UpperCamelCase ,help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' ,type=_UpperCamelCase ,default='''summarization''' ,help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' ,type=_UpperCamelCase ,default=8 ,required=_UpperCamelCase ,help='''batch size''' )
parser.add_argument(
'''--n_obs''' ,type=_UpperCamelCase ,default=-1 ,required=_UpperCamelCase ,help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' ,action='''store_true''' )
parser.add_argument('''--dump-args''' ,action='''store_true''' ,help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' ,nargs='''?''' ,type=_UpperCamelCase ,const=datetime_now() ,help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) ,)
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowerCamelCase ,__lowerCamelCase = parser.parse_known_args()
__lowerCamelCase = parse_numeric_n_bool_cl_kwargs(_UpperCamelCase )
if parsed_args and verbose:
print(F"""parsed the following generate kwargs: {parsed_args}""" )
__lowerCamelCase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowerCamelCase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=_UpperCamelCase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"""score_path {args.score_path} will be overwritten unless you type ctrl-c.""" )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowerCamelCase = generate_summaries_or_translations(
_UpperCamelCase ,args.save_path ,args.model_name ,batch_size=args.bs ,device=args.device ,fpaa=args.fpaa ,task=args.task ,prefix=args.prefix ,**_UpperCamelCase ,)
if args.reference_path is None:
return {}
# Compute scores
__lowerCamelCase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowerCamelCase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowerCamelCase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(_UpperCamelCase )]
__lowerCamelCase = score_fn(_UpperCamelCase ,_UpperCamelCase )
scores.update(_UpperCamelCase )
if args.dump_args:
scores.update(_UpperCamelCase )
if args.info:
__lowerCamelCase = args.info
if verbose:
print(_UpperCamelCase )
if args.score_path is not None:
json.dump(_UpperCamelCase ,open(args.score_path ,'''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 622 | 1 |
'''simple docstring'''
def lowerCAmelCase__ ( lowerCamelCase : int ,lowerCamelCase : float ,lowerCamelCase : float ):
return round(float(moles / volume ) * nfactor )
def lowerCAmelCase__ ( lowerCamelCase : float ,lowerCamelCase : float ,lowerCamelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (volume) ) )
def lowerCAmelCase__ ( lowerCamelCase : float ,lowerCamelCase : float ,lowerCamelCase : float ):
return round(float((moles * 0.0_821 * temperature) / (pressure) ) )
def lowerCAmelCase__ ( lowerCamelCase : float ,lowerCamelCase : float ,lowerCamelCase : float ):
return round(float((pressure * volume) / (0.0_821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 |
'''simple docstring'''
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def lowerCAmelCase__ ( lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ):
assert isinstance(lowerCamelCase ,lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Optional[Any] ):
_A : Union[str, Any] = tmp_path / 'cache'
_A : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Optional[Any] = ParquetDatasetReader(lowerCamelCase ,cache_dir=lowerCamelCase ,keep_in_memory=lowerCamelCase ).read()
_check_parquet_dataset(lowerCamelCase ,lowerCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ,lowerCamelCase : str ,lowerCamelCase : List[str] ):
_A : List[str] = tmp_path / 'cache'
_A : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : List[str] = features.copy() if features else default_expected_features
_A : List[Any] = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : Union[str, Any] = ParquetDatasetReader(lowerCamelCase ,features=lowerCamelCase ,cache_dir=lowerCamelCase ).read()
_check_parquet_dataset(lowerCamelCase ,lowerCamelCase )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def lowerCAmelCase__ ( lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : str ):
_A : Dict = tmp_path / 'cache'
_A : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : str = ParquetDatasetReader(lowerCamelCase ,cache_dir=lowerCamelCase ,split=lowerCamelCase ).read()
_check_parquet_dataset(lowerCamelCase ,lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' ,[str, list] )
def lowerCAmelCase__ ( lowerCamelCase : Dict ,lowerCamelCase : Dict ,lowerCamelCase : Tuple ):
if issubclass(lowerCamelCase ,lowerCamelCase ):
_A : Tuple = parquet_path
elif issubclass(lowerCamelCase ,lowerCamelCase ):
_A : List[Any] = [parquet_path]
_A : Optional[int] = tmp_path / 'cache'
_A : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : List[str] = ParquetDatasetReader(lowerCamelCase ,cache_dir=lowerCamelCase ).read()
_check_parquet_dataset(lowerCamelCase ,lowerCamelCase )
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : Tuple ,lowerCamelCase : Dict=("train",) ):
assert isinstance(lowerCamelCase ,lowerCamelCase )
for split in splits:
_A : str = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' ,[False, True] )
def lowerCAmelCase__ ( lowerCamelCase : Optional[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[int] ):
_A : str = tmp_path / 'cache'
_A : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_A : Dict = ParquetDatasetReader(
{'train': parquet_path} ,cache_dir=lowerCamelCase ,keep_in_memory=lowerCamelCase ).read()
_check_parquet_datasetdict(lowerCamelCase ,lowerCamelCase )
@pytest.mark.parametrize(
'features' ,[
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] ,)
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : str ,lowerCamelCase : Optional[int] ):
_A : List[Any] = tmp_path / 'cache'
_A : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : str = features.copy() if features else default_expected_features
_A : Tuple = (
Features({feature: Value(lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
_A : List[str] = ParquetDatasetReader({'train': parquet_path} ,features=lowerCamelCase ,cache_dir=lowerCamelCase ).read()
_check_parquet_datasetdict(lowerCamelCase ,lowerCamelCase )
@pytest.mark.parametrize('split' ,[None, NamedSplit('train' ), 'train', 'test'] )
def lowerCAmelCase__ ( lowerCamelCase : str ,lowerCamelCase : List[str] ,lowerCamelCase : Dict ):
if split:
_A : Optional[int] = {split: parquet_path}
else:
_A : Tuple = 'train'
_A : Union[str, Any] = {'train': parquet_path, 'test': parquet_path}
_A : str = tmp_path / 'cache'
_A : Optional[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
_A : Optional[int] = ParquetDatasetReader(lowerCamelCase ,cache_dir=lowerCamelCase ).read()
_check_parquet_datasetdict(lowerCamelCase ,lowerCamelCase ,splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def lowerCAmelCase__ ( lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ):
_A : int = ParquetDatasetWriter(lowerCamelCase ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
_A : Optional[Any] = pq.ParquetFile(tmp_path / 'foo.parquet' )
_A : Tuple = pf.read()
assert dataset.data.table == output_table
def lowerCAmelCase__ ( lowerCamelCase : List[Any] ,lowerCamelCase : str ):
_A : str = str(shared_datadir / 'test_image_rgb.jpg' )
_A : Optional[Any] = {'image': [image_path]}
_A : Optional[int] = Features({'image': Image()} )
_A : Union[str, Any] = Dataset.from_dict(lowerCamelCase ,features=lowerCamelCase )
_A : List[str] = ParquetDatasetWriter(lowerCamelCase ,tmp_path / 'foo.parquet' )
assert writer.write() > 0
_A : List[str] = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
_A : Optional[Any] = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) ,streaming=lowerCamelCase ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' ,[
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] ,)
def lowerCAmelCase__ ( lowerCamelCase : Tuple ,lowerCamelCase : List[Any] ):
assert get_writer_batch_size(lowerCamelCase ) == expected
| 128 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class __snake_case (_UpperCamelCase ):
def __init__( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=13 , _UpperCAmelCase : int=7 , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[str]=False , _UpperCAmelCase : Optional[int]=True , _UpperCAmelCase : List[Any]=99 , _UpperCAmelCase : Union[str, Any]=32 , _UpperCAmelCase : Optional[int]=5 , _UpperCAmelCase : Union[str, Any]=4 , _UpperCAmelCase : List[str]=37 , _UpperCAmelCase : Optional[int]="gelu" , _UpperCAmelCase : Any=0.1 , _UpperCAmelCase : List[Any]=0.1 , _UpperCAmelCase : Union[str, Any]=512 , _UpperCAmelCase : Tuple=16 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : List[Any]=0.02 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Dict=4 , _UpperCAmelCase : List[Any]=None , ) -> Any:
'''simple docstring'''
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : Optional[int] = seq_length
_lowerCAmelCase : Optional[Any] = is_training
_lowerCAmelCase : int = use_input_mask
_lowerCAmelCase : Any = use_token_type_ids
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Optional[Any] = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Optional[int] = type_vocab_size
_lowerCAmelCase : Dict = type_sequence_label_size
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = num_labels
_lowerCAmelCase : Optional[Any] = num_choices
_lowerCAmelCase : int = scope
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Union[str, Any] = None
if self.use_input_mask:
_lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Tuple = None
if self.use_labels:
_lowerCAmelCase : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
'''simple docstring'''
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any ) -> str:
'''simple docstring'''
_lowerCAmelCase : Any = DistilBertModel(config=__a )
model.to(__a )
model.eval()
_lowerCAmelCase : Dict = model(__a , __a )
_lowerCAmelCase : str = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE ( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Any ) -> int:
'''simple docstring'''
_lowerCAmelCase : List[Any] = DistilBertForMaskedLM(config=__a )
model.to(__a )
model.eval()
_lowerCAmelCase : Optional[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = DistilBertForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(
__a , attention_mask=__a , start_positions=__a , end_positions=__a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : int , _UpperCAmelCase : List[str] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : str = self.num_labels
_lowerCAmelCase : List[str] = DistilBertForSequenceClassification(__a )
model.to(__a )
model.eval()
_lowerCAmelCase : List[Any] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> str:
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Dict = DistilBertForTokenClassification(config=__a )
model.to(__a )
model.eval()
_lowerCAmelCase : Optional[int] = model(__a , attention_mask=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Tuple = self.num_choices
_lowerCAmelCase : List[Any] = DistilBertForMultipleChoice(config=__a )
model.to(__a )
model.eval()
_lowerCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : int = model(
__a , attention_mask=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
(_lowerCAmelCase) : List[str] = config_and_inputs
_lowerCAmelCase : Tuple = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __snake_case (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase__ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
lowerCAmelCase__ = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : Tuple = DistilBertModelTester(self )
_lowerCAmelCase : Optional[Any] = ConfigTester(self , config_class=__a , dim=37 )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__a )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__a )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__a )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__a )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
'''simple docstring'''
_lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__a )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__a )
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> str:
'''simple docstring'''
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = DistilBertModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@slow
@require_torch_gpu
def SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
'''simple docstring'''
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
_lowerCAmelCase : Tuple = True
_lowerCAmelCase : Tuple = model_class(config=__a )
_lowerCAmelCase : int = self._prepare_for_class(__a , __a )
_lowerCAmelCase : str = torch.jit.trace(
__a , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__a , os.path.join(__a , """traced_model.pt""" ) )
_lowerCAmelCase : Optional[int] = torch.jit.load(os.path.join(__a , """traced_model.pt""" ) , map_location=__a )
loaded(inputs_dict["""input_ids"""].to(__a ) , inputs_dict["""attention_mask"""].to(__a ) )
@require_torch
class __snake_case (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
'''simple docstring'''
_lowerCAmelCase : List[Any] = DistilBertModel.from_pretrained("""distilbert-base-uncased""" )
_lowerCAmelCase : int = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase : Any = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Any = model(__a , attention_mask=__a )[0]
_lowerCAmelCase : str = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __a )
_lowerCAmelCase : Optional[int] = torch.tensor(
[[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1E-4 ) )
| 707 |
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
_lowerCAmelCase : Dict = [0] * len(UpperCamelCase_ )
for i in range(1 , len(UpperCamelCase_ ) ):
# use last results for better performance - dynamic programming
_lowerCAmelCase : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_lowerCAmelCase : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_lowerCAmelCase : Optional[int] = j
return prefix_result
def _UpperCAmelCase (UpperCamelCase_ : str ):
'''simple docstring'''
return max(prefix_function(UpperCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 | 0 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """M-CLIP"""
def __init__( self , lowerCAmelCase=1_024 , lowerCAmelCase=768 , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
_lowercase =transformerDimSize
_lowercase =imageDimSize
super().__init__(**lowerCAmelCase )
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = MCLIPConfig
def __init__( self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ) -> Optional[Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase )
_lowercase =XLMRobertaModel(lowerCAmelCase )
_lowercase =torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Optional[int]:
'''simple docstring'''
_lowercase =self.transformer(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
_lowercase =(embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(lowerCAmelCase ), embs
| 291 |
from __future__ import annotations
from statistics import mean
def a ( A__ : list[int] , A__ : list[int] , A__ : int ) -> list[int]:
"""simple docstring"""
_lowercase =[0] * no_of_processes
_lowercase =[0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(A__ ):
_lowercase =burst_time[i]
_lowercase =[]
_lowercase =0
_lowercase =0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_lowercase =[]
_lowercase =-1
for i in range(A__ ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(A__ )
if len(A__ ) > 0:
_lowercase =ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_lowercase =i
total_time += burst_time[target_process]
completed += 1
_lowercase =0
_lowercase =(
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def a ( A__ : list[int] , A__ : int , A__ : list[int] ) -> list[int]:
"""simple docstring"""
_lowercase =[0] * no_of_processes
for i in range(A__ ):
_lowercase =burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('[TEST CASE 01]')
lowercase_ = 4
lowercase_ = [2, 5, 3, 7]
lowercase_ = [0, 0, 0, 0]
lowercase_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
lowercase_ = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time')
for i, process_id in enumerate(list(range(1, 5))):
print(
f"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
f"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(f"\nAverage waiting time = {mean(waiting_time):.5f}")
print(f"Average turnaround time = {mean(turn_around_time):.5f}")
| 291 | 1 |
import os
from distutils.util import strtobool
def __magic_name__ ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] ):
for e in env_keys:
__a : Optional[Any] = int(os.environ.get(lowercase__ , -1 ) )
if val >= 0:
return val
return default
def __magic_name__ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any]=False ):
__a : Optional[Any] = os.environ.get(lowercase__ , str(lowercase__ ) )
return strtobool(lowercase__ ) == 1 # As its name indicates `strtobool` actually returns an int...
def __magic_name__ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple="no" ):
__a : str = os.environ.get(lowercase__ , str(lowercase__ ) )
return value
| 702 |
"""simple docstring"""
def __magic_name__ ( _lowerCamelCase : list[int] ):
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__a : Any = sum(_lowerCamelCase ) / len(_lowerCamelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __UpperCamelCase (lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : str, lowerCAmelCase : PreTrainedTokenizer, lowerCAmelCase : int, lowerCAmelCase : Optional[int] = None, ) -> Dict:
A = {}
if train_file is not None:
A = [train_file]
if eval_file is not None:
A = [eval_file]
if test_file is not None:
A = [test_file]
A = datasets.load_dataset('csv', data_files=lowerCAmelCase )
A = list(ds[list(files.keys() )[0]].features.keys() )
A = features_name.pop(lowerCAmelCase )
A = list(set(ds[list(files.keys() )[0]][label_name] ) )
A = {label: i for i, label in enumerate(lowerCAmelCase )}
A = tokenizer.model_input_names
A = {}
if len(lowerCAmelCase ) == 1:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]], truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length' ), batched=lowerCAmelCase, )
elif len(lowerCAmelCase ) == 2:
for k in files.keys():
A = ds[k].map(
lambda lowerCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]), truncation=lowerCAmelCase, max_length=lowerCAmelCase, padding='max_length', ), batched=lowerCAmelCase, )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
A = {k: v for k, v in ex.items() if k in input_names}
A = labelaid[ex[label_name]]
yield (d, label)
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
A = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
A = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
A = (
tf.data.Dataset.from_generator(
lowerCAmelCase, ({k: tf.intaa for k in input_names}, tf.intaa), ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )), )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
A = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
_UpperCAmelCase = logging.getLogger(__name__)
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={'''help''': '''Which column contains the label'''} )
SCREAMING_SNAKE_CASE : str = field(default=__lowercase , metadata={'''help''': '''The path of the training file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the development file'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__lowercase , metadata={'''help''': '''The path of the test file'''} )
SCREAMING_SNAKE_CASE : int = field(
default=1_28 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
@dataclass
class _UpperCAmelCase :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
SCREAMING_SNAKE_CASE : bool = field(default=__lowercase , metadata={'''help''': '''Set this flag to use fast tokenization.'''} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
def __UpperCamelCase () -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
' --overwrite_output_dir to overcome.' )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO, )
logger.info(
f'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
f'''16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
A = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
A , A , A , A = get_tfds(
train_file=data_args.train_file, eval_file=data_args.dev_file, test_file=data_args.test_file, tokenizer=lowerCAmelCase, label_column_id=data_args.label_column_id, max_seq_length=data_args.max_seq_length, )
A = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=len(lowerCAmelCase ), labelaid=lowerCAmelCase, idalabel={id: label for label, id in labelaid.items()}, finetuning_task='text-classification', cache_dir=model_args.cache_dir, )
with training_args.strategy.scope():
A = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path, from_pt=bool('.bin' in model_args.model_name_or_path ), config=lowerCAmelCase, cache_dir=model_args.cache_dir, )
def compute_metrics(lowerCAmelCase : EvalPrediction ) -> Dict:
A = np.argmax(p.predictions, axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
A = TFTrainer(
model=lowerCAmelCase, args=lowerCAmelCase, train_dataset=lowerCAmelCase, eval_dataset=lowerCAmelCase, compute_metrics=lowerCAmelCase, )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
A = {}
if training_args.do_eval:
logger.info('*** Evaluate ***' )
A = trainer.evaluate()
A = os.path.join(training_args.output_dir, 'eval_results.txt' )
with open(lowerCAmelCase, 'w' ) as writer:
logger.info('***** Eval results *****' )
for key, value in result.items():
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
results.update(lowerCAmelCase )
return results
if __name__ == "__main__":
main()
| 699 |
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
else:
return a * actual_power(lowerCAmelCase, int(b / 2 ) ) * actual_power(lowerCAmelCase, int(b / 2 ) )
def __UpperCamelCase (lowerCAmelCase : int, lowerCAmelCase : int ) -> float:
if b < 0:
return 1 / actual_power(lowerCAmelCase, lowerCAmelCase )
return actual_power(lowerCAmelCase, lowerCAmelCase )
if __name__ == "__main__":
print(power(-2, -3))
| 699 | 1 |
'''simple docstring'''
UpperCamelCase_ = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict ) -> list[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = set()
# keep track of all the paths to be checked
SCREAMING_SNAKE_CASE__ :Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
SCREAMING_SNAKE_CASE__ :List[str] = queue.pop(0 )
# get the last node from the path
SCREAMING_SNAKE_CASE__ :Any = path[-1]
if node not in explored:
SCREAMING_SNAKE_CASE__ :List[str] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
SCREAMING_SNAKE_CASE__ :Optional[int] = list(UpperCAmelCase__ )
new_path.append(UpperCAmelCase__ )
queue.append(UpperCAmelCase__ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(UpperCAmelCase__ )
# in case there's no path between the 2 nodes
return []
def lowerCamelCase ( UpperCAmelCase__ : dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Dict ) -> int:
'''simple docstring'''
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [start]
SCREAMING_SNAKE_CASE__ :Union[str, Any] = set(UpperCAmelCase__ )
# Keep tab on distances from `start` node.
SCREAMING_SNAKE_CASE__ :Union[str, Any] = {start: 0, target: -1}
while queue:
SCREAMING_SNAKE_CASE__ :Tuple = queue.pop(0 )
if node == target:
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(UpperCAmelCase__ )
queue.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 320 | '''simple docstring'''
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
A_ : int = LongformerTokenizer
A_ : int = True
A_ : Optional[Any] = LongformerTokenizerFast
A_ : Tuple = True
def __lowerCamelCase ( self : int ) -> Tuple:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ :Union[str, Any] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE__ :Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) )
SCREAMING_SNAKE_CASE__ :Dict = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE__ :Any = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ :Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase_ ) )
def __lowerCamelCase ( self : Tuple , **UpperCamelCase_ : Union[str, Any] ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] , **UpperCamelCase_ : Union[str, Any] ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase_ )
def __lowerCamelCase ( self : List[str] , UpperCamelCase_ : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Tuple = 'lower newer'
SCREAMING_SNAKE_CASE__ :Tuple = 'lower newer'
return input_text, output_text
def __lowerCamelCase ( self : Optional[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :str = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ :Any = 'lower newer'
SCREAMING_SNAKE_CASE__ :Optional[Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.tokenize(UpperCamelCase_ ) # , add_prefix_space=True)
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ :List[Any] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) , UpperCamelCase_ )
def __lowerCamelCase ( self : Optional[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' , add_special_tokens=UpperCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' , add_special_tokens=UpperCamelCase_ ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def __lowerCamelCase ( self : List[str] ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :int = self.tokenizer_class.from_pretrained('allenai/longformer-base-4096' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer.encode('sequence builders' , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.encode('multi-sequence build' , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = tokenizer.encode(
'sequence builders' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase_ , UpperCamelCase_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __lowerCamelCase ( self : Dict ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ :List[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ :Any = 'Encode this sequence.'
SCREAMING_SNAKE_CASE__ :int = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE__ :str = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer.encode(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE__ :Optional[int] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ )} ) # mask token has a left space
SCREAMING_SNAKE_CASE__ :Any = tokenizer.convert_tokens_to_ids(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = 'Encode <mask> sequence'
SCREAMING_SNAKE_CASE__ :Optional[Any] = 'Encode <mask>sequence'
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = encoded.index(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer.encode(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = encoded.index(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[str]:
pass
def __lowerCamelCase ( self : List[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.tokenizer_class.from_pretrained(UpperCamelCase_ , **UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE__ :str = tokenizer_r.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = tokenizer_p.encode_plus(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , return_token_type_ids=UpperCamelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE__ :int = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE__ :str = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
UpperCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
UpperCamelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def __lowerCamelCase ( self : Dict ) -> List[str]:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE__ :str = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] , UpperCamelCase_ )
self.assertEqual(post_processor_state['add_prefix_space'] , UpperCamelCase_ )
self.assertEqual(post_processor_state['trim_offsets'] , UpperCamelCase_ )
def __lowerCamelCase ( self : Dict ) -> List[Any]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
SCREAMING_SNAKE_CASE__ :Tuple = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE__ :Any = f'''{text_of_1_token} {text_of_1_token}'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ) + 1, len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :str = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(UpperCamelCase_ ), len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :int = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE__ :int = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ) + 1, 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Any = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
SCREAMING_SNAKE_CASE__ :Tuple = self.rust_tokenizer_class.from_pretrained(
UpperCamelCase_ , use_fast=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , trim_offsets=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = tokenizer_r(UpperCamelCase_ , return_offsets_mapping=UpperCamelCase_ , add_special_tokens=UpperCamelCase_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(UpperCamelCase_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(UpperCamelCase_ ), 1 + len(UpperCamelCase_ ) + 1 + len(UpperCamelCase_ )) , )
| 320 | 1 |
"""simple docstring"""
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def __magic_name__ ( _lowerCamelCase: int ) -> bool:
'''simple docstring'''
lowerCAmelCase = int(number**0.5 )
return number == sq * sq
def __magic_name__ ( _lowerCamelCase: int, _lowerCamelCase: int, _lowerCamelCase: int, _lowerCamelCase: int, _lowerCamelCase: int, _lowerCamelCase: int ) -> tuple[int, int]:
'''simple docstring'''
lowerCAmelCase = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
lowerCAmelCase = x_den * y_den * z_den
lowerCAmelCase = gcd(_lowerCamelCase, _lowerCamelCase )
top //= hcf
bottom //= hcf
return top, bottom
def __magic_name__ ( _lowerCamelCase: int = 35 ) -> int:
'''simple docstring'''
lowerCAmelCase = set()
lowerCAmelCase = 42
lowerCAmelCase = Fraction(0 )
lowerCAmelCase = 42
for x_num in range(1, order + 1 ):
for x_den in range(x_num + 1, order + 1 ):
for y_num in range(1, order + 1 ):
for y_den in range(y_num + 1, order + 1 ):
# n=1
lowerCAmelCase = x_num * y_den + x_den * y_num
lowerCAmelCase = x_den * y_den
lowerCAmelCase = gcd(_lowerCamelCase, _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=2
lowerCAmelCase = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
lowerCAmelCase = x_den * x_den * y_den * y_den
if is_sq(_lowerCamelCase ) and is_sq(_lowerCamelCase ):
lowerCAmelCase = int(sqrt(_lowerCamelCase ) )
lowerCAmelCase = int(sqrt(_lowerCamelCase ) )
lowerCAmelCase = gcd(_lowerCamelCase, _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=-1
lowerCAmelCase = x_num * y_num
lowerCAmelCase = x_den * y_num + x_num * y_den
lowerCAmelCase = gcd(_lowerCamelCase, _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
unique_s.add(_lowerCamelCase )
# n=2
lowerCAmelCase = x_num * x_num * y_num * y_num
lowerCAmelCase = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(_lowerCamelCase ) and is_sq(_lowerCamelCase ):
lowerCAmelCase = int(sqrt(_lowerCamelCase ) )
lowerCAmelCase = int(sqrt(_lowerCamelCase ) )
lowerCAmelCase = gcd(_lowerCamelCase, _lowerCamelCase )
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
lowerCAmelCase = add_three(
_lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
unique_s.add(_lowerCamelCase )
for num, den in unique_s:
total += Fraction(_lowerCamelCase, _lowerCamelCase )
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 535 |
"""simple docstring"""
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( _lowerCamelCase: Optional[Any] ) -> Dict:
'''simple docstring'''
def wrapper(*_lowerCamelCase: Any, **_lowerCamelCase: Union[str, Any] ):
lowerCAmelCase = timeit.default_timer()
lowerCAmelCase = func(*_lowerCamelCase, **_lowerCamelCase )
lowerCAmelCase = timeit.default_timer() - starttime
return delta
lowerCAmelCase = func.__name__
return wrapper
def __magic_name__ ( _lowerCamelCase: dict, _lowerCamelCase: List[Any]=100, _lowerCamelCase: int=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = []
lowerCAmelCase = seq_shapes or {}
for i in range(_lowerCamelCase ):
lowerCAmelCase = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(_lowerCamelCase, _ArrayXD ):
lowerCAmelCase = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(_lowerCamelCase, datasets.Value ):
if v.dtype == "string":
lowerCAmelCase = '''The small grey turtle was surprisingly fast when challenged.'''
else:
lowerCAmelCase = np.random.randint(10, size=1 ).astype(v.dtype ).item()
elif isinstance(_lowerCamelCase, datasets.Sequence ):
while isinstance(_lowerCamelCase, datasets.Sequence ):
lowerCAmelCase = v.feature
lowerCAmelCase = seq_shapes[k]
lowerCAmelCase = np.random.rand(*_lowerCamelCase ).astype(v.dtype )
lowerCAmelCase = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( _lowerCamelCase: Tuple, _lowerCamelCase: Tuple, _lowerCamelCase: Union[str, Any]=100, _lowerCamelCase: List[Any]=None ) -> List[Any]:
'''simple docstring'''
lowerCAmelCase = generate_examples(_lowerCamelCase, num_examples=_lowerCamelCase, seq_shapes=_lowerCamelCase )
with ArrowWriter(features=_lowerCamelCase, path=_lowerCamelCase ) as writer:
for key, record in dummy_data:
lowerCAmelCase = features.encode_example(_lowerCamelCase )
writer.write(_lowerCamelCase )
lowerCAmelCase , lowerCAmelCase = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
F"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
lowerCAmelCase = datasets.Dataset.from_file(filename=_lowerCamelCase, info=datasets.DatasetInfo(features=_lowerCamelCase ) )
return dataset
| 535 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __UpperCAmelCase ( __a ):
__A : int = ['image_processor', 'tokenizer']
__A : Union[str, Any] = 'ViltImageProcessor'
__A : Any = ('BertTokenizer', 'BertTokenizerFast')
def __init__( self , _lowerCamelCase=None , _lowerCamelCase=None , **_lowerCamelCase ):
lowerCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowerCamelCase , )
lowerCAmelCase_ = kwargs.pop('''feature_extractor''' )
lowerCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowerCamelCase , _lowerCamelCase )
lowerCAmelCase_ = self.image_processor
def __call__( self , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = True , _lowerCamelCase = False , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = 0 , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = None , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = False , _lowerCamelCase = True , _lowerCamelCase = None , **_lowerCamelCase , ):
lowerCAmelCase_ = self.tokenizer(
text=_lowerCamelCase , add_special_tokens=_lowerCamelCase , padding=_lowerCamelCase , truncation=_lowerCamelCase , max_length=_lowerCamelCase , stride=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_overflowing_tokens=_lowerCamelCase , return_special_tokens_mask=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , return_length=_lowerCamelCase , verbose=_lowerCamelCase , return_tensors=_lowerCamelCase , **_lowerCamelCase , )
# add pixel_values + pixel_mask
lowerCAmelCase_ = self.image_processor(_lowerCamelCase , return_tensors=_lowerCamelCase )
encoding.update(_lowerCamelCase )
return encoding
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.batch_decode(*_lowerCamelCase , **_lowerCamelCase )
def UpperCAmelCase_ ( self , *_lowerCamelCase , **_lowerCamelCase ):
return self.tokenizer.decode(*_lowerCamelCase , **_lowerCamelCase )
@property
def UpperCAmelCase_ ( self ):
lowerCAmelCase_ = self.tokenizer.model_input_names
lowerCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase_ ( self ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowerCamelCase , )
return self.image_processor_class
@property
def UpperCAmelCase_ ( self ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowerCamelCase , )
return self.image_processor
| 606 | '''simple docstring'''
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def snake_case_ ( __snake_case : str = "laptop") -> DataFrame:
lowerCAmelCase_ = F'''https://www.amazon.in/laptop/s?k={product}'''
lowerCAmelCase_ = {
'''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''',
'''Accept-Language''': '''en-US, en;q=0.5''',
}
lowerCAmelCase_ = BeautifulSoup(requests.get(__snake_case , headers=__snake_case).text)
# Initialize a Pandas dataframe with the column titles
lowerCAmelCase_ = DataFrame(
columns=[
'''Product Title''',
'''Product Link''',
'''Current Price of the product''',
'''Product Rating''',
'''MRP of the product''',
'''Discount''',
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''}) , ):
try:
lowerCAmelCase_ = item.ha.text
lowerCAmelCase_ = '''https://www.amazon.in/''' + item.ha.a['''href''']
lowerCAmelCase_ = item.find('''span''' , attrs={'''class''': '''a-offscreen'''}).text
try:
lowerCAmelCase_ = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''}).text
except AttributeError:
lowerCAmelCase_ = '''Not available'''
try:
lowerCAmelCase_ = (
'''₹'''
+ item.find(
'''span''' , attrs={'''class''': '''a-price a-text-price'''}).text.split('''₹''')[1]
)
except AttributeError:
lowerCAmelCase_ = ''''''
try:
lowerCAmelCase_ = float(
(
(
float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
- float(product_price.strip('''₹''').replace(''',''' , ''''''))
)
/ float(product_mrp.strip('''₹''').replace(''',''' , ''''''))
)
* 100)
except ValueError:
lowerCAmelCase_ = float('''nan''')
except AttributeError:
pass
lowerCAmelCase_ = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
lowerCAmelCase_ = ''' '''
lowerCAmelCase_ = ''' '''
data_frame.index += 1
return data_frame
if __name__ == "__main__":
A_ : Optional[int] ='''headphones'''
get_amazon_product_data(product).to_csv(f'''Amazon Product Data for {product}.csv''')
| 606 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Optional[int] = {
"""facebook/wav2vec2-base-960h""": """https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json""",
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class lowerCAmelCase__ ( _lowerCAmelCase ):
A = "wav2vec2"
def __init__( self : List[Any] , UpperCamelCase_ : Dict=32 , UpperCamelCase_ : List[str]=768 , UpperCamelCase_ : Optional[Any]=12 , UpperCamelCase_ : Dict=12 , UpperCamelCase_ : Tuple=3_072 , UpperCamelCase_ : Tuple="gelu" , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Optional[int]=0.1 , UpperCamelCase_ : Optional[Any]=0.0 , UpperCamelCase_ : List[str]=0.0 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Dict=0.1 , UpperCamelCase_ : int=0.02 , UpperCamelCase_ : List[Any]=1e-5 , UpperCamelCase_ : int="group" , UpperCamelCase_ : List[str]="gelu" , UpperCamelCase_ : str=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase_ : List[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_ : Dict=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : List[str]=128 , UpperCamelCase_ : int=16 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : int=True , UpperCamelCase_ : Union[str, Any]=0.05 , UpperCamelCase_ : str=10 , UpperCamelCase_ : Any=2 , UpperCamelCase_ : Dict=0.0 , UpperCamelCase_ : Tuple=10 , UpperCamelCase_ : List[str]=0 , UpperCamelCase_ : Tuple=320 , UpperCamelCase_ : Optional[Any]=2 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Tuple=100 , UpperCamelCase_ : List[str]=256 , UpperCamelCase_ : Union[str, Any]=256 , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Optional[Any]="sum" , UpperCamelCase_ : Dict=False , UpperCamelCase_ : Any=False , UpperCamelCase_ : List[Any]=256 , UpperCamelCase_ : Optional[int]=(512, 512, 512, 512, 1_500) , UpperCamelCase_ : int=(5, 3, 3, 1, 1) , UpperCamelCase_ : List[Any]=(1, 2, 3, 1, 1) , UpperCamelCase_ : Tuple=512 , UpperCamelCase_ : str=0 , UpperCamelCase_ : Dict=1 , UpperCamelCase_ : List[str]=2 , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : List[str]=3 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : List[Any]=3 , UpperCamelCase_ : Union[str, Any]=None , UpperCamelCase_ : Optional[Any]=None , **UpperCamelCase_ : Tuple , ) -> str:
"""simple docstring"""
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
lowerCamelCase_ : Tuple = hidden_size
lowerCamelCase_ : Tuple = feat_extract_norm
lowerCamelCase_ : List[Any] = feat_extract_activation
lowerCamelCase_ : str = list(UpperCamelCase_ )
lowerCamelCase_ : int = list(UpperCamelCase_ )
lowerCamelCase_ : int = list(UpperCamelCase_ )
lowerCamelCase_ : Optional[Any] = conv_bias
lowerCamelCase_ : List[str] = num_conv_pos_embeddings
lowerCamelCase_ : int = num_conv_pos_embedding_groups
lowerCamelCase_ : Tuple = len(self.conv_dim )
lowerCamelCase_ : str = num_hidden_layers
lowerCamelCase_ : Optional[Any] = intermediate_size
lowerCamelCase_ : Dict = hidden_act
lowerCamelCase_ : Any = num_attention_heads
lowerCamelCase_ : List[str] = hidden_dropout
lowerCamelCase_ : int = attention_dropout
lowerCamelCase_ : List[str] = activation_dropout
lowerCamelCase_ : Tuple = feat_proj_dropout
lowerCamelCase_ : Optional[Any] = final_dropout
lowerCamelCase_ : Union[str, Any] = layerdrop
lowerCamelCase_ : Optional[int] = layer_norm_eps
lowerCamelCase_ : List[str] = initializer_range
lowerCamelCase_ : Tuple = vocab_size
lowerCamelCase_ : Any = do_stable_layer_norm
lowerCamelCase_ : Tuple = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='''
''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='''
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
lowerCamelCase_ : Optional[int] = apply_spec_augment
lowerCamelCase_ : List[Any] = mask_time_prob
lowerCamelCase_ : Tuple = mask_time_length
lowerCamelCase_ : Union[str, Any] = mask_time_min_masks
lowerCamelCase_ : Union[str, Any] = mask_feature_prob
lowerCamelCase_ : str = mask_feature_length
lowerCamelCase_ : Any = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
lowerCamelCase_ : Union[str, Any] = num_codevectors_per_group
lowerCamelCase_ : Any = num_codevector_groups
lowerCamelCase_ : Optional[Any] = contrastive_logits_temperature
lowerCamelCase_ : int = feat_quantizer_dropout
lowerCamelCase_ : str = num_negatives
lowerCamelCase_ : int = codevector_dim
lowerCamelCase_ : Any = proj_codevector_dim
lowerCamelCase_ : Optional[int] = diversity_loss_weight
# ctc loss
lowerCamelCase_ : int = ctc_loss_reduction
lowerCamelCase_ : int = ctc_zero_infinity
# adapter
lowerCamelCase_ : int = add_adapter
lowerCamelCase_ : Any = adapter_kernel_size
lowerCamelCase_ : List[Any] = adapter_stride
lowerCamelCase_ : List[str] = num_adapter_layers
lowerCamelCase_ : List[Any] = output_hidden_size or hidden_size
lowerCamelCase_ : Optional[Any] = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
lowerCamelCase_ : Any = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
lowerCamelCase_ : str = list(UpperCamelCase_ )
lowerCamelCase_ : Any = list(UpperCamelCase_ )
lowerCamelCase_ : str = list(UpperCamelCase_ )
lowerCamelCase_ : Dict = xvector_output_dim
@property
def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 501 |
'''simple docstring'''
from typing import Callable, List, Optional, Union
import PIL
import torch
from transformers import (
CLIPImageProcessor,
CLIPSegForImageSegmentation,
CLIPSegProcessor,
CLIPTextModel,
CLIPTokenizer,
)
from diffusers import DiffusionPipeline
from diffusers.configuration_utils import FrozenDict
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import deprecate, is_accelerate_available, logging
__lowerCamelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase__ ( _lowerCAmelCase ):
def __init__( self : int , UpperCamelCase_ : CLIPSegForImageSegmentation , UpperCamelCase_ : CLIPSegProcessor , UpperCamelCase_ : AutoencoderKL , UpperCamelCase_ : CLIPTextModel , UpperCamelCase_ : CLIPTokenizer , UpperCamelCase_ : UNetaDConditionModel , UpperCamelCase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase_ : StableDiffusionSafetyChecker , UpperCamelCase_ : CLIPImageProcessor , ) -> Optional[int]:
"""simple docstring"""
super().__init__()
if hasattr(scheduler.config , '''steps_offset''' ) and scheduler.config.steps_offset != 1:
lowerCamelCase_ : int = (
F"""The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"""
F""" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure """
'''to update the config accordingly as leaving `steps_offset` might led to incorrect results'''
''' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,'''
''' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`'''
''' file'''
)
deprecate('''steps_offset!=1''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCamelCase_ : Union[str, Any] = dict(scheduler.config )
lowerCamelCase_ : Optional[Any] = 1
lowerCamelCase_ : List[Any] = FrozenDict(UpperCamelCase_ )
if hasattr(scheduler.config , '''skip_prk_steps''' ) and scheduler.config.skip_prk_steps is False:
lowerCamelCase_ : Any = (
F"""The configuration file of this scheduler: {scheduler} has not set the configuration"""
''' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make'''
''' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to'''
''' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face'''
''' Hub, it would be very nice if you could open a Pull request for the'''
''' `scheduler/scheduler_config.json` file'''
)
deprecate('''skip_prk_steps not set''' , '''1.0.0''' , UpperCamelCase_ , standard_warn=UpperCamelCase_ )
lowerCamelCase_ : Dict = dict(scheduler.config )
lowerCamelCase_ : Union[str, Any] = True
lowerCamelCase_ : Any = FrozenDict(UpperCamelCase_ )
if safety_checker is None:
logger.warning(
F"""You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"""
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
segmentation_model=UpperCamelCase_ , segmentation_processor=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , )
def __UpperCamelCase ( self : str , UpperCamelCase_ : Optional[Union[str, int]] = "auto" ) -> int:
"""simple docstring"""
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ : str = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCamelCase_ )
def __UpperCamelCase ( self : Tuple ) -> List[Any]:
"""simple docstring"""
self.enable_attention_slicing(UpperCamelCase_ )
def __UpperCamelCase ( self : List[Any] ) -> Any:
"""simple docstring"""
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
lowerCamelCase_ : List[str] = torch.device('''cuda''' )
for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]:
if cpu_offloaded_model is not None:
cpu_offload(UpperCamelCase_ , UpperCamelCase_ )
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
if self.device != torch.device('''meta''' ) or not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(UpperCamelCase_ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
def __call__( self : Union[str, Any] , UpperCamelCase_ : Union[str, List[str]] , UpperCamelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCamelCase_ : str , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 512 , UpperCamelCase_ : int = 50 , UpperCamelCase_ : float = 7.5 , UpperCamelCase_ : Optional[Union[str, List[str]]] = None , UpperCamelCase_ : Optional[int] = 1 , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : Optional[torch.Generator] = None , UpperCamelCase_ : Optional[torch.FloatTensor] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , UpperCamelCase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase_ : int = 1 , **UpperCamelCase_ : Dict , ) -> List[Any]:
"""simple docstring"""
lowerCamelCase_ : Optional[Any] = self.segmentation_processor(
text=[text] , images=[image] , padding='''max_length''' , return_tensors='''pt''' ).to(self.device )
lowerCamelCase_ : Union[str, Any] = self.segmentation_model(**UpperCamelCase_ )
lowerCamelCase_ : Dict = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy()
lowerCamelCase_ : int = self.numpy_to_pil(UpperCamelCase_ )[0].resize(image.size )
# Run inpainting pipeline with the generated mask
lowerCamelCase_ : List[str] = StableDiffusionInpaintPipeline(
vae=self.vae , text_encoder=self.text_encoder , tokenizer=self.tokenizer , unet=self.unet , scheduler=self.scheduler , safety_checker=self.safety_checker , feature_extractor=self.feature_extractor , )
return inpainting_pipeline(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , height=UpperCamelCase_ , width=UpperCamelCase_ , num_inference_steps=UpperCamelCase_ , guidance_scale=UpperCamelCase_ , negative_prompt=UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ , eta=UpperCamelCase_ , generator=UpperCamelCase_ , latents=UpperCamelCase_ , output_type=UpperCamelCase_ , return_dict=UpperCamelCase_ , callback=UpperCamelCase_ , callback_steps=UpperCamelCase_ , )
| 501 | 1 |
def lowerCamelCase( a__):
if num < 0:
return False
_SCREAMING_SNAKE_CASE =num
_SCREAMING_SNAKE_CASE =0
while num > 0:
_SCREAMING_SNAKE_CASE =rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod() | 191 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Optional[Any] = '''T5Config'''
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "mt5"
UpperCAmelCase = MTaConfig
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "mt5"
UpperCAmelCase = MTaConfig
class A__ ( UpperCamelCase__ ):
UpperCAmelCase = "mt5"
UpperCAmelCase = MTaConfig | 191 | 1 |
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _UpperCamelCase ( A__ ):
'''simple docstring'''
__UpperCamelCase : Optional[int] = CustomTokenizer
pass
| 548 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 369 | 0 |
'''simple docstring'''
def __a ( A__ ) -> bool:
return credit_card_number.startswith(("34", "35", "37", "4", "5", "6") )
def __a ( A__ ) -> bool:
lowerCAmelCase = credit_card_number
lowerCAmelCase = 0
lowerCAmelCase = len(A__ ) - 2
for i in range(A__ , -1 , -2 ):
# double the value of every second digit
lowerCAmelCase = int(cc_number[i] )
digit *= 2
# If doubling of a number results in a two digit number
# i.e greater than 9(e.g., 6 × 2 = 12),
# then add the digits of the product (e.g., 12: 1 + 2 = 3, 15: 1 + 5 = 6),
# to get a single digit number.
if digit > 9:
digit %= 10
digit += 1
lowerCAmelCase = cc_number[:i] + str(A__ ) + cc_number[i + 1 :]
total += digit
# Sum up the remaining digits
for i in range(len(A__ ) - 1 , -1 , -2 ):
total += int(cc_number[i] )
return total % 10 == 0
def __a ( A__ ) -> bool:
lowerCAmelCase = f"{credit_card_number} is an invalid credit card number because"
if not credit_card_number.isdigit():
print(f"{error_message} it has nonnumerical characters." )
return False
if not 13 <= len(A__ ) <= 16:
print(f"{error_message} of its length." )
return False
if not validate_initial_digits(A__ ):
print(f"{error_message} of its first two digits." )
return False
if not luhn_validation(A__ ):
print(f"{error_message} it fails the Luhn check." )
return False
print(f"{credit_card_number} is a valid credit card number." )
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
validate_credit_card_number('4111111111111111')
validate_credit_card_number('32323')
| 159 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str]=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=7 , SCREAMING_SNAKE_CASE : Any=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Union[str, Any]=True , SCREAMING_SNAKE_CASE : Optional[int]=True , SCREAMING_SNAKE_CASE : Optional[int]=9_9 , SCREAMING_SNAKE_CASE : Dict=3_2 , SCREAMING_SNAKE_CASE : Dict=5 , SCREAMING_SNAKE_CASE : Optional[int]=4 , SCREAMING_SNAKE_CASE : Optional[Any]=3_7 , SCREAMING_SNAKE_CASE : int="gelu" , SCREAMING_SNAKE_CASE : Any=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Dict=5_1_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : List[Any]=0.0_2 , SCREAMING_SNAKE_CASE : Optional[int]=4 , ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = seq_length
lowerCAmelCase = is_training
lowerCAmelCase = use_attention_mask
lowerCAmelCase = use_token_type_ids
lowerCAmelCase = use_labels
lowerCAmelCase = vocab_size
lowerCAmelCase = hidden_size
lowerCAmelCase = num_hidden_layers
lowerCAmelCase = num_attention_heads
lowerCAmelCase = intermediate_size
lowerCAmelCase = hidden_act
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = max_position_embeddings
lowerCAmelCase = type_vocab_size
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = initializer_range
lowerCAmelCase = num_choices
def __A ( self : List[str] ) -> List[str]:
"""simple docstring"""
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_attention_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
if self.use_token_type_ids:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def __A ( self : List[Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = config_and_inputs
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _lowerCAmelCase ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = True
lowerCAmelCase = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
lowerCAmelCase = FlaxRobertaModelTester(self )
@slow
def __A ( self : str ) -> Optional[Any]:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowerCAmelCase = model_class_name.from_pretrained("roberta-base" , from_pt=SCREAMING_SNAKE_CASE )
lowerCAmelCase = model(np.ones((1, 1) ) )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
| 159 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.