code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def UpperCamelCase ( snake_case__ : List[Any] ) -> Optional[Any]: if is_torch_version('<' , '2.0.0' ) or not hasattr(snake_case__ , '_dynamo' ): return False return isinstance(snake_case__ , torch._dynamo.eval_frame.OptimizedModule ) def UpperCamelCase ( snake_case__ : str , snake_case__ : bool = True ) -> Tuple: UpperCamelCase : Tuple = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) UpperCamelCase : Optional[Any] = is_compiled_module(snake_case__ ) if is_compiled: UpperCamelCase : List[str] = model UpperCamelCase : Tuple = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(snake_case__ , snake_case__ ): UpperCamelCase : List[str] = model.module if not keep_fpaa_wrapper: UpperCamelCase : Optional[int] = getattr(snake_case__ , 'forward' ) UpperCamelCase : int = model.__dict__.pop('_original_forward' , snake_case__ ) if original_forward is not None: while hasattr(snake_case__ , '__wrapped__' ): UpperCamelCase : Any = forward.__wrapped__ if forward == original_forward: break UpperCamelCase : Any = forward if getattr(snake_case__ , '_converted_to_transformer_engine' , snake_case__ ): convert_model(snake_case__ , to_transformer_engine=snake_case__ ) if is_compiled: UpperCamelCase : Tuple = model UpperCamelCase : int = compiled_model return model def UpperCamelCase ( ) -> Any: PartialState().wait_for_everyone() def UpperCamelCase ( snake_case__ : Optional[Any] , snake_case__ : Dict ) -> int: if PartialState().distributed_type == DistributedType.TPU: xm.save(snake_case__ , snake_case__ ) elif PartialState().local_process_index == 0: torch.save(snake_case__ , snake_case__ ) @contextmanager def UpperCamelCase ( **snake_case__ : str ) -> str: for key, value in kwargs.items(): UpperCamelCase : int = str(snake_case__ ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def UpperCamelCase ( snake_case__ : str ) -> Any: if not hasattr(snake_case__ , '__qualname__' ) and not hasattr(snake_case__ , '__name__' ): UpperCamelCase : Tuple = getattr(snake_case__ , '__class__' , snake_case__ ) if hasattr(snake_case__ , '__qualname__' ): return obj.__qualname__ if hasattr(snake_case__ , '__name__' ): return obj.__name__ return str(snake_case__ ) def UpperCamelCase ( snake_case__ : Any , snake_case__ : Optional[Any] ) -> Tuple: for key, value in source.items(): if isinstance(snake_case__ , snake_case__ ): UpperCamelCase : Optional[Any] = destination.setdefault(snake_case__ , {} ) merge_dicts(snake_case__ , snake_case__ ) else: UpperCamelCase : List[Any] = value return destination def UpperCamelCase ( snake_case__ : int = None ) -> bool: if port is None: UpperCamelCase : Union[str, Any] = 29500 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(('localhost', port) ) == 0
40
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : str =logging.get_logger(__name__) __lowerCAmelCase : Any ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert''' def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = vocab_size __SCREAMING_SNAKE_CASE : List[str] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : str = initializer_range __SCREAMING_SNAKE_CASE : Dict = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = position_embedding_type __SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
696
0
'''simple docstring''' import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets lowerCAmelCase__ = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowerCAmelCase__ = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' lowerCAmelCase__ = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowercase_ (datasets.Metric ): """simple docstring""" def SCREAMING_SNAKE_CASE ( self : List[Any] ): if version.parse(scb.__version__ ) < version.parse('''1.4.12''' ): raise ImportWarning( '''To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn\'t match this condition.\n''' '''You can install it with `pip install "sacrebleu>=1.4.12"`.''' ) return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,homepage='''https://github.com/mjpost/sacreBLEU#chrf--chrf''' ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { '''predictions''': datasets.Value('''string''' ,id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' ,id='''sequence''' ) ,id='''references''' ), } ) ,codebase_urls=['''https://github.com/mjpost/sacreBLEU#chrf--chrf'''] ,reference_urls=[ '''https://github.com/m-popovic/chrF''', ] ,) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ,lowercase__ : str ,lowercase__ : List[Any] ,lowercase__ : int = CHRF.CHAR_ORDER ,lowercase__ : int = CHRF.WORD_ORDER ,lowercase__ : int = CHRF.BETA ,lowercase__ : bool = False ,lowercase__ : bool = False ,lowercase__ : bool = False ,): __lowercase = len(references[0] ) if any(len(lowercase__ ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) __lowercase = [[refs[i] for refs in references] for i in range(lowercase__ )] __lowercase = CHRF(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) __lowercase = sb_chrf.corpus_score(lowercase__ ,lowercase__ ) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
41
import os import sys import unittest __lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers') __lowerCAmelCase : Optional[Any] ='\n{0} = None\n' __lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' __lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Tuple ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tokenizers''' ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' ) __SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' ) __SCREAMING_SNAKE_CASE : Any = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' ) __SCREAMING_SNAKE_CASE : List[str] = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' ) def __magic_name__( self :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , lowerCAmelCase__ ) self.assertIn('''tensorflow_text''' , lowerCAmelCase__ ) self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def __magic_name__( self :Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' ) __SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __SCREAMING_SNAKE_CASE : int = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
696
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = tempfile.mkdtemp() lowerCamelCase_ = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '的', '价', '格', '是', '15', '便', 'alex', '##andra', ',', '。', '-', 't', 'shirt', ] lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) lowerCamelCase_ = { 'do_resize': True, 'size': {'height': 224, 'width': 224}, 'do_center_crop': True, 'crop_size': {'height': 18, 'width': 18}, 'do_normalize': True, 'image_mean': [0.48_145_466, 0.4_578_275, 0.40_821_073], 'image_std': [0.26_862_954, 0.26_130_258, 0.27_577_711], 'do_convert_rgb': True, } lowerCamelCase_ = os.path.join(self.tmpdirname , SCREAMING_SNAKE_CASE_ ) with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp: json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self , **SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]: '''simple docstring''' return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> List[str]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def UpperCamelCase( self ) -> Optional[Any]: '''simple docstring''' lowerCamelCase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowerCamelCase_ = [Image.fromarray(np.moveaxis(SCREAMING_SNAKE_CASE_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCamelCase( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = self.get_rust_tokenizer() lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_slow.save_pretrained(self.tmpdirname ) lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) processor_fast.save_pretrained(self.tmpdirname ) lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(processor_fast.image_processor , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowerCamelCase_ = self.get_tokenizer(cls_token='(CLS)' , sep_token='(SEP)' ) lowerCamelCase_ = self.get_image_processor(do_normalize=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='(CLS)' , sep_token='(SEP)' , do_normalize=SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , SCREAMING_SNAKE_CASE_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> Dict: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='np' ) lowerCamelCase_ = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。' lowerCamelCase_ = processor(text=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer(SCREAMING_SNAKE_CASE_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCamelCase( self ) -> Tuple: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。' lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(SCREAMING_SNAKE_CASE_ ): processor() def UpperCamelCase( self ) -> str: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowerCamelCase_ = processor.batch_decode(SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE_ ) self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) def UpperCamelCase( self ) -> List[str]: '''simple docstring''' lowerCamelCase_ = self.get_image_processor() lowerCamelCase_ = self.get_tokenizer() lowerCamelCase_ = ChineseCLIPProcessor(tokenizer=SCREAMING_SNAKE_CASE_ , image_processor=SCREAMING_SNAKE_CASE_ ) lowerCamelCase_ = 'Alexandra,T-shirt的价格是15便士。' lowerCamelCase_ = self.prepare_image_inputs() lowerCamelCase_ = processor(text=SCREAMING_SNAKE_CASE_ , images=SCREAMING_SNAKE_CASE_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
42
import math from numpy import inf from scipy.integrate import quad def _UpperCamelCase ( lowercase__ ): if num <= 0: raise ValueError('''math domain error''' ) return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0] def _UpperCamelCase ( lowercase__ , lowercase__ ): return math.pow(lowercase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
696
0
from argparse import ArgumentParser from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand lowerCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" if not path: return "pipe" for ext in PipelineDataFormat.SUPPORTED_FORMATS: if path.endswith(SCREAMING_SNAKE_CASE ): return ext raise Exception( f'Unable to determine file format from file extension {path}. ' f'Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}' ) def _a ( SCREAMING_SNAKE_CASE ): """simple docstring""" lowercase__ = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) lowercase__ = try_infer_format_from_ext(args.input ) if args.format == '''infer''' else args.format lowercase__ = PipelineDataFormat.from_str( format=SCREAMING_SNAKE_CASE , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , ) return RunCommand(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) class _a ( UpperCamelCase__ ): def __init__( self: Dict , UpperCamelCase_: Pipeline , UpperCamelCase_: PipelineDataFormat ) -> Tuple: """simple docstring""" lowercase__ = nlp lowercase__ = reader @staticmethod def lowerCamelCase_ ( UpperCamelCase_: ArgumentParser ) -> Dict: """simple docstring""" lowercase__ = parser.add_parser('''run''' , help='''Run a pipeline through the CLI''' ) run_parser.add_argument('''--task''' , choices=get_supported_tasks() , help='''Task to run''' ) run_parser.add_argument('''--input''' , type=UpperCamelCase_ , help='''Path to the file to use for inference''' ) run_parser.add_argument('''--output''' , type=UpperCamelCase_ , help='''Path to the file that will be used post to write results.''' ) run_parser.add_argument('''--model''' , type=UpperCamelCase_ , help='''Name or path to the model to instantiate.''' ) run_parser.add_argument('''--config''' , type=UpperCamelCase_ , help='''Name or path to the model\'s config to instantiate.''' ) run_parser.add_argument( '''--tokenizer''' , type=UpperCamelCase_ , help='''Name of the tokenizer to use. (default: same as the model name)''' ) run_parser.add_argument( '''--column''' , type=UpperCamelCase_ , help='''Name of the column to use as input. (For multi columns input as QA use column1,columns2)''' , ) run_parser.add_argument( '''--format''' , type=UpperCamelCase_ , default='''infer''' , choices=PipelineDataFormat.SUPPORTED_FORMATS , help='''Input format to read from''' , ) run_parser.add_argument( '''--device''' , type=UpperCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) run_parser.add_argument('''--overwrite''' , action='''store_true''' , help='''Allow overwriting the output file.''' ) run_parser.set_defaults(func=UpperCamelCase_ ) def lowerCamelCase_ ( self: int ) -> Optional[int]: """simple docstring""" lowercase__ , lowercase__ = self._nlp, [] for entry in self._reader: lowercase__ = nlp(**UpperCamelCase_ ) if self._reader.is_multi_columns else nlp(UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ): outputs.append(UpperCamelCase_ ) else: outputs += output # Saving data if self._nlp.binary_output: lowercase__ = self._reader.save_binary(UpperCamelCase_ ) logger.warning(f'Current pipeline requires output to be in binary format, saving at {binary_path}' ) else: self._reader.save(UpperCamelCase_ )
43
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate __SCREAMING_SNAKE_CASE : int = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
696
0
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ): """simple docstring""" return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(_lowerCAmelCase , _lowerCAmelCase ) ) ) def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ): """simple docstring""" if dataset.ndim != value_array.ndim: _lowerCamelCase : Tuple = ( "Wrong input data's dimensions... " F'dataset : {dataset.ndim}, value_array : {value_array.ndim}' ) raise ValueError(_lowerCAmelCase ) try: if dataset.shape[1] != value_array.shape[1]: _lowerCamelCase : Tuple = ( "Wrong input data's shape... " F'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}' ) raise ValueError(_lowerCAmelCase ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("Wrong shape" ) if dataset.dtype != value_array.dtype: _lowerCamelCase : List[str] = ( "Input data have different datatype... " F'dataset : {dataset.dtype}, value_array : {value_array.dtype}' ) raise TypeError(_lowerCAmelCase ) _lowerCamelCase : Optional[int] = [] for value in value_array: _lowerCamelCase : Optional[int] = euclidean(_lowerCAmelCase , dataset[0] ) _lowerCamelCase : Union[str, Any] = dataset[0].tolist() for dataset_value in dataset[1:]: _lowerCamelCase : int = euclidean(_lowerCAmelCase , _lowerCAmelCase ) if dist > temp_dist: _lowerCamelCase : int = temp_dist _lowerCamelCase : Union[str, Any] = dataset_value.tolist() answer.append([vector, dist] ) return answer def A_ ( _lowerCAmelCase : np.ndarray , _lowerCAmelCase : np.ndarray ): """simple docstring""" return np.dot(_lowerCAmelCase , _lowerCAmelCase ) / (norm(_lowerCAmelCase ) * norm(_lowerCAmelCase )) if __name__ == "__main__": import doctest doctest.testmod()
44
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ ) if is_square_free(lowercase__ ): return -1 if len(lowercase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
696
0
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class lowerCAmelCase_ ( lowercase ): """simple docstring""" def __init__( self :int , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :UNetaDModel , lowerCamelCase__ :DDPMScheduler , lowerCamelCase__ :List[Any] , ): super().__init__() UpperCamelCase__ :Tuple = value_function UpperCamelCase__ :Optional[int] = unet UpperCamelCase__ :List[str] = scheduler UpperCamelCase__ :Dict = env UpperCamelCase__ :Dict = env.get_dataset() UpperCamelCase__ :Union[str, Any] = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].mean() except: # noqa: E722 pass UpperCamelCase__ :Any = {} for key in self.data.keys(): try: UpperCamelCase__ :int = self.data[key].std() except: # noqa: E722 pass UpperCamelCase__ :List[Any] = env.observation_space.shape[0] UpperCamelCase__ :List[str] = env.action_space.shape[0] def __a ( self :Union[str, Any] , lowerCamelCase__ :List[str] , lowerCamelCase__ :str ): return (x_in - self.means[key]) / self.stds[key] def __a ( self :int , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): return x_in * self.stds[key] + self.means[key] def __a ( self :Any , lowerCamelCase__ :int ): if type(lowerCamelCase__ ) is dict: return {k: self.to_torch(lowerCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(lowerCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(lowerCamelCase__ , device=self.unet.device ) def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :Optional[Any] , lowerCamelCase__ :Tuple ): for key, val in cond.items(): UpperCamelCase__ :str = val.clone() return x_in def __a ( self :Union[str, Any] , lowerCamelCase__ :List[Any] , lowerCamelCase__ :int , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :Optional[int] ): UpperCamelCase__ :Any = x.shape[0] UpperCamelCase__ :List[Any] = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model UpperCamelCase__ :Optional[Any] = torch.full((batch_size,) , lowerCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(lowerCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models UpperCamelCase__ :Dict = self.value_function(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample UpperCamelCase__ :List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] UpperCamelCase__ :Union[str, Any] = self.scheduler._get_variance(lowerCamelCase__ ) UpperCamelCase__ :Any = torch.exp(0.5 * posterior_variance ) UpperCamelCase__ :Dict = model_std * grad UpperCamelCase__ :Optional[Any] = 0 UpperCamelCase__ :Dict = x.detach() UpperCamelCase__ :int = x + scale * grad UpperCamelCase__ :int = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[str] = self.unet(x.permute(0 , 2 , 1 ) , lowerCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg UpperCamelCase__ :List[str] = self.scheduler.step(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , predict_epsilon=lowerCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) UpperCamelCase__ :Optional[Any] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :Optional[int] = self.to_torch(lowerCamelCase__ ) return x, y def __call__( self :Optional[Any] , lowerCamelCase__ :Optional[int] , lowerCamelCase__ :str=64 , lowerCamelCase__ :Tuple=32 , lowerCamelCase__ :Dict=2 , lowerCamelCase__ :str=0.1 ): # normalize the observations and create batch dimension UpperCamelCase__ :List[str] = self.normalize(lowerCamelCase__ , """observations""" ) UpperCamelCase__ :List[str] = obs[None].repeat(lowerCamelCase__ , axis=0 ) UpperCamelCase__ :int = {0: self.to_torch(lowerCamelCase__ )} UpperCamelCase__ :Dict = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) UpperCamelCase__ :Any = randn_tensor(lowerCamelCase__ , device=self.unet.device ) UpperCamelCase__ :Optional[int] = self.reset_xa(lowerCamelCase__ , lowerCamelCase__ , self.action_dim ) UpperCamelCase__ :List[Any] = self.to_torch(lowerCamelCase__ ) # run the diffusion process UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = self.run_diffusion(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # sort output trajectories by value UpperCamelCase__ :List[Any] = y.argsort(0 , descending=lowerCamelCase__ ).squeeze() UpperCamelCase__ :Dict = x[sorted_idx] UpperCamelCase__ :Tuple = sorted_values[:, :, : self.action_dim] UpperCamelCase__ :Optional[Any] = actions.detach().cpu().numpy() UpperCamelCase__ :Optional[int] = self.de_normalize(lowerCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: UpperCamelCase__ :List[str] = 0 else: # if we didn't run value guiding, select a random action UpperCamelCase__ :Dict = np.random.randint(0 , lowerCamelCase__ ) UpperCamelCase__ :Tuple = denorm_actions[selected_index, 0] return denorm_actions
45
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __lowerCAmelCase : int ={ 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_2_8, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 5_0, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 1_0, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 1_0, 'exponential_decay_length_penalty': (5, 1.0_1), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class _lowercase ( unittest.TestCase ): '''simple docstring''' @classmethod def __magic_name__( cls :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = TOKEN HfFolder.save_token(lowerCAmelCase__ ) @classmethod def __magic_name__( cls :List[str] ) -> List[str]: try: delete_repo(token=cls._token , repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''' ) except HTTPError: pass def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''test-config''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :Dict ) -> Optional[int]: CustomConfig.register_for_auto_class() __SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 ) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' ) self.assertEqual(new_config.attribute , 42 ) class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :List[str] ) -> Dict: __SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int __SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float __SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool __SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str c.update_from_string( f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' ) self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' ) self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' ) self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' ) def __magic_name__( self :Dict ) -> str: __SCREAMING_SNAKE_CASE : Dict = PretrainedConfig() __SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) __SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )] if len(lowerCAmelCase__ ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' f''' {', '.join(lowerCAmelCase__ )}.''' ) def __magic_name__( self :Union[str, Any] ) -> List[Any]: with self.assertRaises(lowerCAmelCase__ ): # config is in subfolder, the following should not work without specifying the subfolder __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) __SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' ) self.assertIsNotNone(lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down __SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock() __SCREAMING_SNAKE_CASE : List[Any] = 500 __SCREAMING_SNAKE_CASE : Union[str, Any] = {} __SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError __SCREAMING_SNAKE_CASE : str = {} # Download this model to make sure it's in the cache. __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head: __SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def __magic_name__( self :Union[str, Any] ) -> List[Any]: # This test is for deprecated behavior and can be removed in v5 __SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def __magic_name__( self :str ) -> List[str]: __SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = 2 json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json'''] __SCREAMING_SNAKE_CASE : Tuple = 768 configuration.save_pretrained(lowerCAmelCase__ ) shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) ) __SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 768 ) def __magic_name__( self :List[str] ) -> Union[str, Any]: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. __SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers __SCREAMING_SNAKE_CASE : int = '''v4.0.0''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained( lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCAmelCase__ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0''' __SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(old_configuration.hidden_size , 768 )
696
0
"""simple docstring""" def lowerCamelCase_( _lowerCamelCase = 100 ) -> int: '''simple docstring''' _lowerCamelCase : List[str] = set() _lowerCamelCase : Optional[Any] = 0 _lowerCamelCase : Optional[int] = n + 1 # maximum limit for a in range(2 , _lowerCamelCase ): for b in range(2 , _lowerCamelCase ): _lowerCamelCase : List[str] = a**b # calculates the current power collect_powers.add(_lowerCamelCase ) # adds the result to the set return len(_lowerCamelCase ) if __name__ == "__main__": print('''Number of terms ''', solution(int(str(input()).strip())))
46
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Any ={ 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int =['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str =[ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
696
0
from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCAmelCase__ ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ): return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCamelCase_ , lowerCamelCase_ ) ) ) def UpperCAmelCase__ ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ): if dataset.ndim != value_array.ndim: __a : Tuple = ( 'Wrong input data\'s dimensions... ' f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}''' ) raise ValueError(lowerCamelCase_ ) try: if dataset.shape[1] != value_array.shape[1]: __a : Dict = ( 'Wrong input data\'s shape... ' f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}''' ) raise ValueError(lowerCamelCase_ ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError('Wrong shape' ) if dataset.dtype != value_array.dtype: __a : Union[str, Any] = ( 'Input data have different datatype... ' f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}''' ) raise TypeError(lowerCamelCase_ ) __a : List[Any] = [] for value in value_array: __a : Any = euclidean(lowerCamelCase_ , dataset[0] ) __a : Tuple = dataset[0].tolist() for dataset_value in dataset[1:]: __a : List[str] = euclidean(lowerCamelCase_ , lowerCamelCase_ ) if dist > temp_dist: __a : Any = temp_dist __a : Optional[int] = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCAmelCase__ ( lowerCamelCase_ : np.ndarray , lowerCamelCase_ : np.ndarray ): return np.dot(lowerCamelCase_ , lowerCamelCase_ ) / (norm(lowerCamelCase_ ) * norm(lowerCamelCase_ )) if __name__ == "__main__": import doctest doctest.testmod()
47
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : List[Any] ={ 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers''' SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values'''] SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple: __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : Optional[int] = d_kv __SCREAMING_SNAKE_CASE : Tuple = d_ff __SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers __SCREAMING_SNAKE_CASE : List[Any] = num_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers else: __SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: __SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers __SCREAMING_SNAKE_CASE : List[Any] = num_heads __SCREAMING_SNAKE_CASE : List[Any] = num_experts __SCREAMING_SNAKE_CASE : Tuple = expert_capacity __SCREAMING_SNAKE_CASE : List[Any] = router_bias __SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) __SCREAMING_SNAKE_CASE : List[Any] = router_dtype __SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens __SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets __SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance __SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate __SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon __SCREAMING_SNAKE_CASE : int = initializer_factor __SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj __SCREAMING_SNAKE_CASE : Any = use_cache __SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs __SCREAMING_SNAKE_CASE : int = router_z_loss_coef __SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef __SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' ) __SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1] __SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated''' if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new''' super().__init__( pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
696
0
'''simple docstring''' import gc import unittest import torch from parameterized import parameterized from diffusers import AutoencoderKL from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class A ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): snake_case__ :Optional[int] = AutoencoderKL snake_case__ :int = 'sample' snake_case__ :str = 1e-2 @property def __SCREAMING_SNAKE_CASE ( self : List[Any] ): """simple docstring""" lowerCAmelCase__ = 4 lowerCAmelCase__ = 3 lowerCAmelCase__ = (32, 32) lowerCAmelCase__ = floats_tensor((batch_size, num_channels) + sizes ).to(__magic_name__ ) return {"sample": image} @property def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" return (3, 32, 32) @property def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" return (3, 32, 32) def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = { "block_out_channels": [32, 64], "in_channels": 3, "out_channels": 3, "down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"], "up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"], "latent_channels": 4, } lowerCAmelCase__ = self.dummy_input return init_dict, inputs_dict def __SCREAMING_SNAKE_CASE ( self : int ): """simple docstring""" pass def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ): """simple docstring""" pass @unittest.skipIf(torch_device == "mps" , "Gradient checkpointing skipped on MPS" ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = self.prepare_init_args_and_inputs_for_common() lowerCAmelCase__ = self.model_class(**__magic_name__ ) model.to(__magic_name__ ) assert not model.is_gradient_checkpointing and model.training lowerCAmelCase__ = model(**__magic_name__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model.zero_grad() lowerCAmelCase__ = torch.randn_like(__magic_name__ ) lowerCAmelCase__ = (out - labels).mean() loss.backward() # re-instantiate the model now enabling gradient checkpointing lowerCAmelCase__ = self.model_class(**__magic_name__ ) # clone model model_a.load_state_dict(model.state_dict() ) model_a.to(__magic_name__ ) model_a.enable_gradient_checkpointing() assert model_a.is_gradient_checkpointing and model_a.training lowerCAmelCase__ = model_a(**__magic_name__ ).sample # run the backwards pass on the model. For backwards pass, for simplicity purpose, # we won't calculate the loss and rather backprop on out.sum() model_a.zero_grad() lowerCAmelCase__ = (out_a - labels).mean() loss_a.backward() # compare the output and parameters gradients self.assertTrue((loss - loss_a).abs() < 1E-5 ) lowerCAmelCase__ = dict(model.named_parameters() ) lowerCAmelCase__ = dict(model_a.named_parameters() ) for name, param in named_params.items(): self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5E-5 ) ) def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ ,lowerCAmelCase__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" , output_loading_info=__magic_name__ ) self.assertIsNotNone(__magic_name__ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(__magic_name__ ) lowerCAmelCase__ = model(**self.dummy_input ) assert image is not None, "Make sure output is not None" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" lowerCAmelCase__ = AutoencoderKL.from_pretrained("fusing/autoencoder-kl-dummy" ) lowerCAmelCase__ = model.to(__magic_name__ ) model.eval() if torch_device == "mps": lowerCAmelCase__ = torch.manual_seed(0 ) else: lowerCAmelCase__ = torch.Generator(device=__magic_name__ ).manual_seed(0 ) lowerCAmelCase__ = torch.randn( 1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , ) lowerCAmelCase__ = image.to(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ , sample_posterior=__magic_name__ , generator=__magic_name__ ).sample lowerCAmelCase__ = output[0, -1, -3:, -3:].flatten().cpu() # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. if torch_device == "mps": lowerCAmelCase__ = torch.tensor( [ -4.0078E-01, -3.8323E-04, -1.2681E-01, -1.1462E-01, 2.0095E-01, 1.0893E-01, -8.8247E-02, -3.0361E-01, -9.8644E-03, ] ) elif torch_device == "cpu": lowerCAmelCase__ = torch.tensor( [-0.1352, 0.0878, 0.0419, -0.0818, -0.1069, 0.0688, -0.1458, -0.4446, -0.0026] ) else: lowerCAmelCase__ = torch.tensor( [-0.2421, 0.4642, 0.2507, -0.0438, 0.0682, 0.3160, -0.2018, -0.0727, 0.2485] ) self.assertTrue(torch_all_close(__magic_name__ , __magic_name__ , rtol=1E-2 ) ) @slow class A ( unittest.TestCase ): def __SCREAMING_SNAKE_CASE ( self : Optional[Any] , __magic_name__ : List[Any] , __magic_name__ : Any ): """simple docstring""" return f"""gaussian_noise_s={seed}_shape={"_".join([str(__magic_name__ ) for s in shape] )}.npy""" def __SCREAMING_SNAKE_CASE ( self : Tuple ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[str]=0 , __magic_name__ : str=(4, 3, 512, 512) , __magic_name__ : str=False ): """simple docstring""" lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa lowerCAmelCase__ = torch.from_numpy(load_hf_numpy(self.get_file_format(__magic_name__ , __magic_name__ ) ) ).to(__magic_name__ ).to(__magic_name__ ) return image def __SCREAMING_SNAKE_CASE ( self : int , __magic_name__ : List[str]="CompVis/stable-diffusion-v1-4" , __magic_name__ : Optional[Any]=False ): """simple docstring""" lowerCAmelCase__ = "fp16" if fpaa else None lowerCAmelCase__ = torch.floataa if fpaa else torch.floataa lowerCAmelCase__ = AutoencoderKL.from_pretrained( __magic_name__ , subfolder="vae" , torch_dtype=__magic_name__ , revision=__magic_name__ , ) model.to(__magic_name__ ).eval() return model def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Union[str, Any]=0 ): """simple docstring""" if torch_device == "mps": return torch.manual_seed(__magic_name__ ) return torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ ) @parameterized.expand( [ # fmt: off [33, [-0.1603, 0.9878, -0.0495, -0.0790, -0.2709, 0.8375, -0.2060, -0.0824], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2376, 0.1168, 0.1332, -0.4840, -0.2508, -0.0791, -0.0493, -0.4089], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : Optional[int] , __magic_name__ : Tuple , __magic_name__ : Optional[int] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ ) lowerCAmelCase__ = self.get_generator(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample assert sample.shape == image.shape lowerCAmelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [33, [-0.0513, 0.0289, 1.3799, 0.2166, -0.2573, -0.0871, 0.5103, -0.0999]], [47, [-0.4128, -0.1320, -0.3704, 0.1965, -0.4116, -0.2332, -0.3340, 0.2247]], # fmt: on ] ) @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : List[str] , __magic_name__ : Optional[int] , __magic_name__ : Any ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model(fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_sd_image(__magic_name__ , fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_generator(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ , generator=__magic_name__ , sample_posterior=__magic_name__ ).sample assert sample.shape == image.shape lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.1609, 0.9866, -0.0487, -0.0777, -0.2716, 0.8368, -0.2055, -0.0814], [-0.2395, 0.0098, 0.0102, -0.0709, -0.2840, -0.0274, -0.0718, -0.1824]], [47, [-0.2377, 0.1147, 0.1333, -0.4841, -0.2506, -0.0805, -0.0491, -0.4085], [0.0350, 0.0847, 0.0467, 0.0344, -0.0842, -0.0547, -0.0633, -0.1131]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : List[Any] , __magic_name__ : Union[str, Any] , __magic_name__ : Dict ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model(__magic_name__ ).sample assert sample.shape == image.shape lowerCAmelCase__ = sample[-1, -2:, -2:, :2].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(expected_slice_mps if torch_device == "mps" else expected_slice ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=3E-3 ) @parameterized.expand( [ # fmt: off [13, [-0.2051, -0.1803, -0.2311, -0.2114, -0.3292, -0.3574, -0.2953, -0.3323]], [37, [-0.2632, -0.2625, -0.2199, -0.2741, -0.4539, -0.4990, -0.3720, -0.4925]], # fmt: on ] ) @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : List[str] , __magic_name__ : List[str] ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-3 ) @parameterized.expand( [ # fmt: off [27, [-0.0369, 0.0207, -0.0776, -0.0682, -0.1747, -0.1930, -0.1465, -0.2039]], [16, [-0.1628, -0.2134, -0.2747, -0.2642, -0.3774, -0.4404, -0.3687, -0.4277]], # fmt: on ] ) @require_torch_gpu def __SCREAMING_SNAKE_CASE ( self : str , __magic_name__ : Optional[int] , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model(fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] lowerCAmelCase__ = sample[-1, -2:, :2, -2:].flatten().float().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) assert torch_all_close(__magic_name__ , __magic_name__ , atol=5E-3 ) @parameterized.expand([(13,), (16,), (27,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def __SCREAMING_SNAKE_CASE ( self : Any , __magic_name__ : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model(fpaa=__magic_name__ ) lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) , fpaa=__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-1 ) @parameterized.expand([(13,), (16,), (37,)] ) @require_torch_gpu @unittest.skipIf(not is_xformers_available() , reason="xformers is not required when using PyTorch 2.0." ) def __SCREAMING_SNAKE_CASE ( self : Tuple , __magic_name__ : List[Any] ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ , shape=(3, 4, 64, 64) ) with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample model.enable_xformers_memory_efficient_attention() with torch.no_grad(): lowerCAmelCase__ = model.decode(__magic_name__ ).sample assert list(sample.shape ) == [3, 3, 512, 512] assert torch_all_close(__magic_name__ , __magic_name__ , atol=1E-2 ) @parameterized.expand( [ # fmt: off [33, [-0.3001, 0.0918, -2.6984, -3.9720, -3.2099, -5.0353, 1.7338, -0.2065, 3.4267]], [47, [-1.5030, -4.3871, -6.0355, -9.1157, -1.6661, -2.7853, 2.1607, -5.0823, 2.5633]], # fmt: on ] ) def __SCREAMING_SNAKE_CASE ( self : Dict , __magic_name__ : Dict , __magic_name__ : Tuple ): """simple docstring""" lowerCAmelCase__ = self.get_sd_vae_model() lowerCAmelCase__ = self.get_sd_image(__magic_name__ ) lowerCAmelCase__ = self.get_generator(__magic_name__ ) with torch.no_grad(): lowerCAmelCase__ = model.encode(__magic_name__ ).latent_dist lowerCAmelCase__ = dist.sample(generator=__magic_name__ ) assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]] lowerCAmelCase__ = sample[0, -1, -3:, -3:].flatten().cpu() lowerCAmelCase__ = torch.tensor(__magic_name__ ) lowerCAmelCase__ = 3E-3 if torch_device != "mps" else 1E-2 assert torch_all_close(__magic_name__ , __magic_name__ , atol=__magic_name__ )
48
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
696
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowercase : str = logging.get_logger(__name__) _lowercase : Optional[int] = { 'microsoft/resnet-50': 'https://huggingface.co/microsoft/resnet-50/blob/main/config.json', } class _UpperCAmelCase ( _lowerCAmelCase , _lowerCAmelCase ): a__ : Optional[int] = "resnet" a__ : str = ["basic", "bottleneck"] def __init__( self : str , _lowercase : Dict=3 , _lowercase : Optional[int]=64 , _lowercase : Dict=[2_56, 5_12, 10_24, 20_48] , _lowercase : Optional[int]=[3, 4, 6, 3] , _lowercase : List[Any]="bottleneck" , _lowercase : List[str]="relu" , _lowercase : int=False , _lowercase : Dict=None , _lowercase : str=None , **_lowercase : Any , ): super().__init__(**_lowercase ) if layer_type not in self.layer_types: raise ValueError(F'''layer_type={layer_type} is not one of {",".join(self.layer_types )}''' ) __UpperCAmelCase = num_channels __UpperCAmelCase = embedding_size __UpperCAmelCase = hidden_sizes __UpperCAmelCase = depths __UpperCAmelCase = layer_type __UpperCAmelCase = hidden_act __UpperCAmelCase = downsample_in_first_stage __UpperCAmelCase = ['''stem'''] + [F'''stage{idx}''' for idx in range(1 , len(_lowercase ) + 1 )] __UpperCAmelCase , __UpperCAmelCase = get_aligned_output_features_output_indices( out_features=_lowercase , out_indices=_lowercase , stage_names=self.stage_names ) class _UpperCAmelCase ( _lowerCAmelCase ): a__ : Tuple = version.parse("1.11" ) @property def a ( self : Optional[Any] ): return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def a ( self : int ): return 1E-3
49
from datetime import datetime import requests def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(lowercase__ ).content if __name__ == "__main__": __lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip() __lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f"""Done. Video saved to disk as {file_name}.""")
696
0
'''simple docstring''' import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer UpperCamelCase : str = logging.get_logger(__name__) UpperCamelCase : Union[str, Any] = {'vocab_file': 'vocab.txt'} UpperCamelCase : int = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } UpperCamelCase : Tuple = { 'YituTech/conv-bert-base': 5_12, 'YituTech/conv-bert-medium-small': 5_12, 'YituTech/conv-bert-small': 5_12, } UpperCamelCase : Dict = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class UpperCamelCase__ (a ): '''simple docstring''' _UpperCamelCase = VOCAB_FILES_NAMES _UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP _UpperCamelCase = PRETRAINED_INIT_CONFIGURATION _UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES _UpperCamelCase = ConvBertTokenizer def __init__( self ,_lowerCAmelCase=None ,_lowerCAmelCase=None ,_lowerCAmelCase=True ,_lowerCAmelCase="[UNK]" ,_lowerCAmelCase="[SEP]" ,_lowerCAmelCase="[PAD]" ,_lowerCAmelCase="[CLS]" ,_lowerCAmelCase="[MASK]" ,_lowerCAmelCase=True ,_lowerCAmelCase=None ,**_lowerCAmelCase ,): super().__init__( _lowerCAmelCase ,tokenizer_file=_lowerCAmelCase ,do_lower_case=_lowerCAmelCase ,unk_token=_lowerCAmelCase ,sep_token=_lowerCAmelCase ,pad_token=_lowerCAmelCase ,cls_token=_lowerCAmelCase ,mask_token=_lowerCAmelCase ,tokenize_chinese_chars=_lowerCAmelCase ,strip_accents=_lowerCAmelCase ,**_lowerCAmelCase ,) lowerCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" ,_lowerCAmelCase ) != do_lower_case or normalizer_state.get("""strip_accents""" ,_lowerCAmelCase ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" ,_lowerCAmelCase ) != tokenize_chinese_chars ): lowerCamelCase__ = getattr(_lowerCAmelCase ,normalizer_state.pop("""type""" ) ) lowerCamelCase__ = do_lower_case lowerCamelCase__ = strip_accents lowerCamelCase__ = tokenize_chinese_chars lowerCamelCase__ = normalizer_class(**_lowerCAmelCase ) lowerCamelCase__ = do_lower_case def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase=None ): lowerCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = [self.sep_token_id] lowerCamelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def UpperCamelCase_ ( self ,_lowerCAmelCase ,_lowerCAmelCase = None ): lowerCamelCase__ = self._tokenizer.model.save(_lowerCAmelCase ,name=_lowerCAmelCase ) return tuple(_lowerCAmelCase )
50
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowercase ( A__ , A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __magic_name__( self :int ) -> Optional[int]: torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] __SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ) if str(lowerCAmelCase__ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ ) else: __SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Union[str, Any] ) -> str: __SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Any = self.get_dummy_components() __SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = '''french fries''' __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = output.images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0 __SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5 __SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) __SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) __SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Union[str, Any] ) -> Dict: __SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __magic_name__( self :str ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0] __SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae'''] __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): __SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode() __SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0] __SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Union[str, Any] ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) __SCREAMING_SNAKE_CASE : Dict = { '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Dict ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = self.get_inputs() __SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Optional[int] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : str = self.get_inputs() __SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = 0 def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None: __SCREAMING_SNAKE_CASE : Dict = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: __SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __magic_name__( self :List[str] ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __magic_name__( self :int ) -> Tuple: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 __SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) ) __SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix''' __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = output.images[0] __SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) __SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
696
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging a__ : Union[str, Any] = logging.get_logger(__name__) a__ : Optional[int] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( UpperCAmelCase_ ): '''simple docstring''' _lowerCamelCase ="megatron-bert" def __init__( self : Dict , a__ : Union[str, Any]=29056 , a__ : Dict=1024 , a__ : str=24 , a__ : Any=16 , a__ : Tuple=4096 , a__ : Optional[int]="gelu" , a__ : Tuple=0.1 , a__ : Tuple=0.1 , a__ : Any=512 , a__ : Optional[Any]=2 , a__ : str=0.02 , a__ : Optional[int]=1e-1_2 , a__ : Union[str, Any]=0 , a__ : Optional[Any]="absolute" , a__ : Dict=True , **a__ : Dict , ): super().__init__(pad_token_id=a__ , **a__ ) UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = position_embedding_type UpperCAmelCase = use_cache
51
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : torch.FloatTensor SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase__ ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) ) return torch.tensor(lowercase__ , dtype=torch.floataa ) class _lowercase ( A__ , A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 1 @register_to_config def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]: if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None: __SCREAMING_SNAKE_CASE : Optional[int] = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one'''] if trained_betas is not None: __SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "linear": __SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __SCREAMING_SNAKE_CASE : List[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas __SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. __SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution __SCREAMING_SNAKE_CASE : Any = 1.0 # setable values __SCREAMING_SNAKE_CASE : Optional[Any] = None __SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) ) def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor: return sample def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' f''' maximal {self.config.num_train_timesteps} timesteps.''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps __SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa ) __SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ ) self.timesteps += self.config.steps_offset def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) __SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process __SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep] __SCREAMING_SNAKE_CASE : str = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) __SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 __SCREAMING_SNAKE_CASE : List[Any] = model_output elif self.config.prediction_type == "sample": __SCREAMING_SNAKE_CASE : List[str] = model_output __SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": __SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output __SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: __SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __len__( self :Optional[int] ) -> List[Any]: return self.config.num_train_timesteps
696
0
"""simple docstring""" import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html A = '''platform''' import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def __A ( a_ :Optional[int] , a_ :List[str] , a_ :Any=None , a_ :Tuple=None , a_ :int=None , a_ :Optional[Any]=None , a_ :int=None , a_ :str=None , ) -> Tuple: if attention_mask is None: __a : Dict = np.where(input_ids != config.pad_token_id , 1 , 0) if decoder_attention_mask is None: __a : Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0) if head_mask is None: __a : str = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: __a : str = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: __a : str = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class __lowercase : '''simple docstring''' def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=99 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=32 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=0 , _UpperCAmelCase=0.0_2 , ): __a : Union[str, Any] = parent __a : Optional[int] = batch_size __a : Union[str, Any] = seq_length __a : Tuple = is_training __a : Union[str, Any] = use_labels __a : Optional[int] = vocab_size __a : Tuple = hidden_size __a : Optional[int] = num_hidden_layers __a : Tuple = num_attention_heads __a : Any = intermediate_size __a : str = hidden_act __a : List[Any] = hidden_dropout_prob __a : str = attention_probs_dropout_prob __a : Tuple = max_position_embeddings __a : str = eos_token_id __a : Tuple = pad_token_id __a : List[str] = bos_token_id __a : Optional[int] = initializer_range def _lowerCamelCase ( self ): __a : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) __a : int = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) __a : int = shift_tokens_right(_UpperCAmelCase , 1 , 2 ) __a : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_UpperCAmelCase , ) __a : Tuple = prepare_blenderbot_inputs_dict(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) return config, inputs_dict def _lowerCamelCase ( self ): __a , __a : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Optional[int] = 20 __a : List[Any] = model_class_name(_UpperCAmelCase ) __a : int = model.encode(inputs_dict['''input_ids'''] ) __a , __a : List[str] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) __a : str = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) __a : Union[str, Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' ) __a : Any = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __a : int = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) __a : Dict = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) __a : Union[str, Any] = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_UpperCAmelCase , ) __a : Optional[int] = model.decode(_UpperCAmelCase , _UpperCAmelCase ) __a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): __a : Union[str, Any] = 20 __a : str = model_class_name(_UpperCAmelCase ) __a : Dict = model.encode(inputs_dict['''input_ids'''] ) __a , __a : Optional[int] = ( inputs_dict['''decoder_input_ids'''], inputs_dict['''decoder_attention_mask'''], ) __a : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __a : Optional[Any] = model.init_cache(decoder_input_ids.shape[0] , _UpperCAmelCase , _UpperCAmelCase ) __a : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __a : Tuple = model.decode( decoder_input_ids[:, :-1] , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , past_key_values=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) __a : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' ) __a : Tuple = model.decode( decoder_input_ids[:, -1:] , _UpperCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_UpperCAmelCase , decoder_position_ids=_UpperCAmelCase , ) __a : Optional[Any] = model.decode(_UpperCAmelCase , _UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase ) __a : int = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" ) @require_flax class __lowercase ( unittest.TestCase ): '''simple docstring''' __lowerCAmelCase = 99 def _lowerCamelCase ( self ): __a : Optional[int] = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) __a : List[Any] = input_ids.shape[0] __a : int = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def _lowerCamelCase ( self ): __a , __a , __a : List[str] = self._get_config_and_data() __a : List[str] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase ) __a : Tuple = lm_model(input_ids=_UpperCAmelCase ) __a : Any = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase ) def _lowerCamelCase ( self ): __a : Dict = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) __a : List[Any] = FlaxBlenderbotSmallForConditionalGeneration(_UpperCAmelCase ) __a : Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) __a : Dict = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) __a : Dict = lm_model(input_ids=_UpperCAmelCase , decoder_input_ids=_UpperCAmelCase ) __a : Dict = (*summary.shape, config.vocab_size) self.assertEqual(outputs['''logits'''].shape , _UpperCAmelCase ) def _lowerCamelCase ( self ): __a : Tuple = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) __a : Union[str, Any] = shift_tokens_right(_UpperCAmelCase , 1 , 2 ) __a : Tuple = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum() __a : Union[str, Any] = np.equal(_UpperCAmelCase , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(_UpperCAmelCase , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class __lowercase ( _UpperCamelCase , unittest.TestCase , _UpperCamelCase ): '''simple docstring''' __lowerCAmelCase = True __lowerCAmelCase = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) __lowerCAmelCase = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def _lowerCamelCase ( self ): __a : List[Any] = FlaxBlenderbotSmallModelTester(self ) def _lowerCamelCase ( self ): __a , __a : Tuple = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _lowerCamelCase ( self ): __a , __a : Optional[Any] = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) def _lowerCamelCase ( self ): __a , __a : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __a : Optional[Any] = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) __a : int = model_class(_UpperCAmelCase ) @jax.jit def encode_jitted(_UpperCAmelCase , _UpperCAmelCase=None , **_UpperCAmelCase ): return model.encode(input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase ) with self.subTest('''JIT Enabled''' ): __a : List[Any] = encode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __a : int = encode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) def _lowerCamelCase ( self ): __a , __a : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __a : Tuple = model_class(_UpperCAmelCase ) __a : Tuple = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] ) __a : Dict = { '''decoder_input_ids''': inputs_dict['''decoder_input_ids'''], '''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''], '''encoder_outputs''': encoder_outputs, } @jax.jit def decode_jitted(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): return model.decode( decoder_input_ids=_UpperCAmelCase , decoder_attention_mask=_UpperCAmelCase , encoder_outputs=_UpperCAmelCase , ) with self.subTest('''JIT Enabled''' ): __a : Any = decode_jitted(**_UpperCAmelCase ).to_tuple() with self.subTest('''JIT Disabled''' ): with jax.disable_jit(): __a : Optional[int] = decode_jitted(**_UpperCAmelCase ).to_tuple() self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) ) for jitted_output, output in zip(_UpperCAmelCase , _UpperCAmelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def _lowerCamelCase ( self ): for model_class_name in self.all_model_classes: __a : Tuple = model_class_name.from_pretrained('''facebook/blenderbot_small-90M''' ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids __a : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id __a : Tuple = model(_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase )
52
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} ) SCREAMING_SNAKE_CASE__ : str = "text" SCREAMING_SNAKE_CASE__ : str = "summary" @property def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
696
0
import warnings from ...utils import is_sklearn_available, requires_backends if is_sklearn_available(): from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef _snake_case : Union[str, Any] = ( 'This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate ' 'library. You can have a look at this example script for pointers: ' 'https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py' ) def a_ ( lowerCAmelCase_ : Optional[int], lowerCAmelCase_ : Union[str, Any] ): warnings.warn(lowerCAmelCase_, lowerCAmelCase_ ) requires_backends(lowerCAmelCase_, 'sklearn' ) return (preds == labels).mean() def a_ ( lowerCAmelCase_ : str, lowerCAmelCase_ : List[str] ): warnings.warn(lowerCAmelCase_, lowerCAmelCase_ ) requires_backends(lowerCAmelCase_, 'sklearn' ) __lowerCAmelCase = simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ ) __lowerCAmelCase = fa_score(y_true=lowerCAmelCase_, y_pred=lowerCAmelCase_ ) return { "acc": acc, "f1": fa, "acc_and_f1": (acc + fa) / 2, } def a_ ( lowerCAmelCase_ : List[str], lowerCAmelCase_ : Optional[int] ): warnings.warn(lowerCAmelCase_, lowerCAmelCase_ ) requires_backends(lowerCAmelCase_, 'sklearn' ) __lowerCAmelCase = pearsonr(lowerCAmelCase_, lowerCAmelCase_ )[0] __lowerCAmelCase = spearmanr(lowerCAmelCase_, lowerCAmelCase_ )[0] return { "pearson": pearson_corr, "spearmanr": spearman_corr, "corr": (pearson_corr + spearman_corr) / 2, } def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : List[str], lowerCAmelCase_ : Union[str, Any] ): warnings.warn(lowerCAmelCase_, lowerCAmelCase_ ) requires_backends(lowerCAmelCase_, 'sklearn' ) assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}""" if task_name == "cola": return {"mcc": matthews_corrcoef(lowerCAmelCase_, lowerCAmelCase_ )} elif task_name == "sst-2": return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )} elif task_name == "mrpc": return acc_and_fa(lowerCAmelCase_, lowerCAmelCase_ ) elif task_name == "sts-b": return pearson_and_spearman(lowerCAmelCase_, lowerCAmelCase_ ) elif task_name == "qqp": return acc_and_fa(lowerCAmelCase_, lowerCAmelCase_ ) elif task_name == "mnli": return {"mnli/acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )} elif task_name == "mnli-mm": return {"mnli-mm/acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )} elif task_name == "qnli": return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )} elif task_name == "rte": return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )} elif task_name == "wnli": return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )} elif task_name == "hans": return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )} else: raise KeyError(lowerCAmelCase_ ) def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Union[str, Any] ): warnings.warn(lowerCAmelCase_, lowerCAmelCase_ ) requires_backends(lowerCAmelCase_, 'sklearn' ) if len(lowerCAmelCase_ ) != len(lowerCAmelCase_ ): raise ValueError(F"""Predictions and labels have mismatched lengths {len(lowerCAmelCase_ )} and {len(lowerCAmelCase_ )}""" ) if task_name == "xnli": return {"acc": simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )} else: raise KeyError(lowerCAmelCase_ )
53
def _UpperCamelCase ( lowercase__ = 10**9 ): __SCREAMING_SNAKE_CASE : List[str] = 1 __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Dict = 0 __SCREAMING_SNAKE_CASE : Any = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
696
0
class A : def __init__( self: Dict ) -> Tuple: '''simple docstring''' UpperCAmelCase_ =0 UpperCAmelCase_ =0 UpperCAmelCase_ ={} def lowerCAmelCase__ ( self: Optional[Any] , _lowerCAmelCase: List[str] ) -> List[Any]: '''simple docstring''' if vertex not in self.adjacency: UpperCAmelCase_ ={} self.num_vertices += 1 def lowerCAmelCase__ ( self: int , _lowerCAmelCase: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict ) -> int: '''simple docstring''' self.add_vertex(_lowerCAmelCase ) self.add_vertex(_lowerCAmelCase ) if head == tail: return UpperCAmelCase_ =weight UpperCAmelCase_ =weight def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ =self.get_edges() for edge in edges: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =edge edges.remove((tail, head, weight) ) for i in range(len(_lowerCAmelCase ) ): UpperCAmelCase_ =list(edges[i] ) edges.sort(key=lambda _lowerCAmelCase : e[2] ) for i in range(len(_lowerCAmelCase ) - 1 ): if edges[i][2] >= edges[i + 1][2]: UpperCAmelCase_ =edges[i][2] + 1 for edge in edges: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =edge UpperCAmelCase_ =weight UpperCAmelCase_ =weight def __str__( self: int ) -> Dict: '''simple docstring''' UpperCAmelCase_ ="" for tail in self.adjacency: for head in self.adjacency[tail]: UpperCAmelCase_ =self.adjacency[head][tail] string += F'{head} -> {tail} == {weight}\n' return string.rstrip("\n" ) def lowerCAmelCase__ ( self: List[str] ) -> Any: '''simple docstring''' UpperCAmelCase_ =[] for tail in self.adjacency: for head in self.adjacency[tail]: output.append((tail, head, self.adjacency[head][tail]) ) return output def lowerCAmelCase__ ( self: int ) -> List[str]: '''simple docstring''' return self.adjacency.keys() @staticmethod def lowerCAmelCase__ ( _lowerCAmelCase: int=None , _lowerCAmelCase: Optional[Any]=None ) -> Tuple: '''simple docstring''' UpperCAmelCase_ =Graph() if vertices is None: UpperCAmelCase_ =[] if edges is None: UpperCAmelCase_ =[] for vertex in vertices: g.add_vertex(_lowerCAmelCase ) for edge in edges: g.add_edge(*_lowerCAmelCase ) return g class A : def __init__( self: Any ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ ={} UpperCAmelCase_ ={} def __len__( self: int ) -> Optional[Any]: '''simple docstring''' return len(self.parent ) def lowerCAmelCase__ ( self: str , _lowerCAmelCase: Optional[int] ) -> Any: '''simple docstring''' if item in self.parent: return self.find(_lowerCAmelCase ) UpperCAmelCase_ =item UpperCAmelCase_ =0 return item def lowerCAmelCase__ ( self: int , _lowerCAmelCase: Any ) -> Optional[int]: '''simple docstring''' if item not in self.parent: return self.make_set(_lowerCAmelCase ) if item != self.parent[item]: UpperCAmelCase_ =self.find(self.parent[item] ) return self.parent[item] def lowerCAmelCase__ ( self: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict ) -> str: '''simple docstring''' UpperCAmelCase_ =self.find(_lowerCAmelCase ) UpperCAmelCase_ =self.find(_lowerCAmelCase ) if roota == roota: return roota if self.rank[roota] > self.rank[roota]: UpperCAmelCase_ =roota return roota if self.rank[roota] < self.rank[roota]: UpperCAmelCase_ =roota return roota if self.rank[roota] == self.rank[roota]: self.rank[roota] += 1 UpperCAmelCase_ =roota return roota return None @staticmethod def lowerCAmelCase__ ( _lowerCAmelCase: Union[str, Any] ) -> Any: '''simple docstring''' UpperCAmelCase_ =graph.num_vertices UpperCAmelCase_ =Graph.UnionFind() UpperCAmelCase_ =[] while num_components > 1: UpperCAmelCase_ ={} for vertex in graph.get_vertices(): UpperCAmelCase_ =-1 UpperCAmelCase_ =graph.get_edges() for edge in edges: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =edge edges.remove((tail, head, weight) ) for edge in edges: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =edge UpperCAmelCase_ =union_find.find(_lowerCAmelCase ) UpperCAmelCase_ =union_find.find(_lowerCAmelCase ) if seta != seta: if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: UpperCAmelCase_ =[head, tail, weight] if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight: UpperCAmelCase_ =[head, tail, weight] for vertex in cheap_edge: if cheap_edge[vertex] != -1: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ =cheap_edge[vertex] if union_find.find(_lowerCAmelCase ) != union_find.find(_lowerCAmelCase ): union_find.union(_lowerCAmelCase , _lowerCAmelCase ) mst_edges.append(cheap_edge[vertex] ) UpperCAmelCase_ =num_components - 1 UpperCAmelCase_ =Graph.build(edges=_lowerCAmelCase ) return mst
54
def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : str = len(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __SCREAMING_SNAKE_CASE : str = True for i in range(lowercase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __SCREAMING_SNAKE_CASE : Union[str, Any] = True if a[i].islower(): __SCREAMING_SNAKE_CASE : Union[str, Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
696
0
from .imports import is_rich_available if is_rich_available(): from rich.traceback import install install(show_locals=False) else: raise ModuleNotFoundError('To use the rich extension, install rich with `pip install rich`')
55
from scipy.stats import pearsonr import datasets __lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' __lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' __lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): '''simple docstring''' def __magic_name__( self :Optional[int] ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int: if return_pvalue: __SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
696
0
'''simple docstring''' from collections.abc import Generator from math import sin def _a (lowercase__ : bytes ) -> bytes: """simple docstring""" if len(lowercase__ ) != 3_2: raise ValueError('Input must be of length 32' ) __snake_case = B'' for i in [3, 2, 1, 0]: little_endian += string_aa[8 * i : 8 * i + 8] return little_endian def _a (lowercase__ : int ) -> bytes: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) __snake_case = format(lowercase__ , '08x' )[-8:] __snake_case = B'' for i in [3, 2, 1, 0]: little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode('utf-8' ) return little_endian_hex def _a (lowercase__ : bytes ) -> bytes: """simple docstring""" __snake_case = B'' for char in message: bit_string += format(lowercase__ , '08b' ).encode('utf-8' ) __snake_case = format(len(lowercase__ ) , '064b' ).encode('utf-8' ) # Pad bit_string to a multiple of 512 chars bit_string += b"1" while len(lowercase__ ) % 5_1_2 != 4_4_8: bit_string += b"0" bit_string += to_little_endian(start_len[3_2:] ) + to_little_endian(start_len[:3_2] ) return bit_string def _a (lowercase__ : bytes ) -> Generator[list[int], None, None]: """simple docstring""" if len(lowercase__ ) % 5_1_2 != 0: raise ValueError('Input must have length that\'s a multiple of 512' ) for pos in range(0 , len(lowercase__ ) , 5_1_2 ): __snake_case = bit_string[pos : pos + 5_1_2] __snake_case = [] for i in range(0 , 5_1_2 , 3_2 ): block_words.append(int(to_little_endian(block[i : i + 3_2] ) , 2 ) ) yield block_words def _a (lowercase__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) __snake_case = format(lowercase__ , '032b' ) __snake_case = '' for c in i_str: new_str += "1" if c == "0" else "0" return int(lowercase__ , 2 ) def _a (lowercase__ : int , lowercase__ : int ) -> int: """simple docstring""" return (a + b) % 2**3_2 def _a (lowercase__ : int , lowercase__ : int ) -> int: """simple docstring""" if i < 0: raise ValueError('Input must be non-negative' ) if shift < 0: raise ValueError('Shift must be non-negative' ) return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2 def _a (lowercase__ : bytes ) -> bytes: """simple docstring""" __snake_case = preprocess(lowercase__ ) __snake_case = [int(2**3_2 * abs(sin(i + 1 ) ) ) for i in range(6_4 )] # Starting states __snake_case = 0x6_7_4_5_2_3_0_1 __snake_case = 0xE_F_C_D_A_B_8_9 __snake_case = 0x9_8_B_A_D_C_F_E __snake_case = 0x1_0_3_2_5_4_7_6 __snake_case = [ 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 7, 1_2, 1_7, 2_2, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 5, 9, 1_4, 2_0, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 4, 1_1, 1_6, 2_3, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, 6, 1_0, 1_5, 2_1, ] # Process bit string in chunks, each with 16 32-char words for block_words in get_block_words(lowercase__ ): __snake_case = aa __snake_case = ba __snake_case = ca __snake_case = da # Hash current chunk for i in range(6_4 ): if i <= 1_5: # f = (b & c) | (not_32(b) & d) # Alternate definition for f __snake_case = d ^ (b & (c ^ d)) __snake_case = i elif i <= 3_1: # f = (d & b) | (not_32(d) & c) # Alternate definition for f __snake_case = c ^ (d & (b ^ c)) __snake_case = (5 * i + 1) % 1_6 elif i <= 4_7: __snake_case = b ^ c ^ d __snake_case = (3 * i + 5) % 1_6 else: __snake_case = c ^ (b | not_aa(lowercase__ )) __snake_case = (7 * i) % 1_6 __snake_case = (f + a + added_consts[i] + block_words[g]) % 2**3_2 __snake_case = d __snake_case = c __snake_case = b __snake_case = sum_aa(lowercase__ , left_rotate_aa(lowercase__ , shift_amounts[i] ) ) # Add hashed chunk to running total __snake_case = sum_aa(lowercase__ , lowercase__ ) __snake_case = sum_aa(lowercase__ , lowercase__ ) __snake_case = sum_aa(lowercase__ , lowercase__ ) __snake_case = sum_aa(lowercase__ , lowercase__ ) __snake_case = reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) + reformat_hex(lowercase__ ) return digest if __name__ == "__main__": import doctest doctest.testmod()
56
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer __lowerCAmelCase : List[str] =logging.get_logger(__name__) __lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : int ={ 'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'}, 'tokenizer_file': { 'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json' }, } __lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2} __lowerCAmelCase : Union[str, Any] ={} class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]: super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars ): __SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case __SCREAMING_SNAKE_CASE : str = strip_accents __SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars __SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = do_lower_case def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple: __SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: __SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ )
696
0
from __future__ import annotations def snake_case (UpperCAmelCase__ ) -> float: if not nums: raise ValueError('List is empty' ) return sum(UpperCAmelCase__ ) / len(UpperCAmelCase__ ) if __name__ == "__main__": import doctest doctest.testmod()
57
import os def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] ) __SCREAMING_SNAKE_CASE : str = len(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : Any = 0 __SCREAMING_SNAKE_CASE : Dict = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(lowercase__ ): for j in range(n_rows - 3 ): __SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] __SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: __SCREAMING_SNAKE_CASE : Any = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: __SCREAMING_SNAKE_CASE : Any = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) __SCREAMING_SNAKE_CASE : Optional[int] = max( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if max_product > largest: __SCREAMING_SNAKE_CASE : Tuple = max_product return largest def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Optional[int] = [] with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file: for line in file: grid.append(line.strip('''\n''' ).split(''' ''' ) ) __SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )] return largest_product(lowercase__ ) if __name__ == "__main__": print(solution())
696
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : List[str] = { '''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ['''MobileViTFeatureExtractor'''] __lowerCAmelCase : Optional[int] = ['''MobileViTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[int] = [ '''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MobileViTForImageClassification''', '''MobileViTForSemanticSegmentation''', '''MobileViTModel''', '''MobileViTPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ '''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFMobileViTForImageClassification''', '''TFMobileViTForSemanticSegmentation''', '''TFMobileViTModel''', '''TFMobileViTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
58
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _lowercase ( A__ ): '''simple docstring''' def __magic_name__( self :List[Any] ) -> Any: __SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) ) class _lowercase : '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str: __SCREAMING_SNAKE_CASE : Any = parent __SCREAMING_SNAKE_CASE : Dict = batch_size __SCREAMING_SNAKE_CASE : List[Any] = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = image_size __SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier __SCREAMING_SNAKE_CASE : Dict = min_depth __SCREAMING_SNAKE_CASE : List[str] = tf_padding __SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier ) __SCREAMING_SNAKE_CASE : List[str] = output_stride __SCREAMING_SNAKE_CASE : Any = hidden_act __SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = is_training __SCREAMING_SNAKE_CASE : Optional[int] = num_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range __SCREAMING_SNAKE_CASE : Optional[int] = scope def __magic_name__( self :List[str] ) -> int: __SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_labels: __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __SCREAMING_SNAKE_CASE : Any = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__( self :Union[str, Any] ) -> Optional[Any]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Tuple = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self :List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowercase ( A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Optional[Any] = ( {'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Tuple = False def __magic_name__( self :Any ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self ) __SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def __magic_name__( self :Dict ) -> Optional[Any]: pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def __magic_name__( self :List[Any] ) -> List[Any]: pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def __magic_name__( self :Any ) -> Dict: pass def __magic_name__( self :Any ) -> List[Any]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __magic_name__( self :Any ) -> Tuple: __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __magic_name__( self :Union[str, Any] ) -> Tuple: def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ): __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states __SCREAMING_SNAKE_CASE : Optional[int] = 26 self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : str = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE : List[Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def __magic_name__( self :List[str] ) -> List[Any]: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): '''simple docstring''' @cached_property def __magic_name__( self :Optional[int] ) -> Union[str, Any]: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__( self :Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor __SCREAMING_SNAKE_CASE : int = prepare_img() __SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): __SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ ) # verify the logits __SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
696
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __A = { "configuration_swiftformer": [ "SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "SwiftFormerConfig", "SwiftFormerOnnxConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A = [ "SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "SwiftFormerForImageClassification", "SwiftFormerModel", "SwiftFormerPreTrainedModel", ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
59
import os from datetime import datetime as dt from github import Github __lowerCAmelCase : List[Any] =[ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' ) for issue in open_issues: __SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
696
0
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.utils import ComputeEnvironment from .cluster import get_cluster_input from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401 from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401 from .sagemaker import get_sagemaker_input lowerCAmelCase_ = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine''' def lowerCamelCase_ ( ) -> int: """simple docstring""" snake_case_ : Optional[Any] = _ask_options( '''In which compute environment are you running?''' , ['''This machine''', '''AWS (Amazon SageMaker)'''] , _convert_compute_environment , ) if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER: snake_case_ : Union[str, Any] = get_sagemaker_input() else: snake_case_ : Union[str, Any] = get_cluster_input() return config def lowerCamelCase_ ( _UpperCamelCase=None ) -> Optional[int]: """simple docstring""" if subparsers is not None: snake_case_ : Tuple = subparsers.add_parser('''config''' , description=_UpperCamelCase ) else: snake_case_ : Tuple = argparse.ArgumentParser('''Accelerate config command''' , description=_UpperCamelCase ) parser.add_argument( '''--config_file''' , default=_UpperCamelCase , help=( '''The path to use to store the config file. Will default to a file named default_config.yaml in the cache ''' '''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have ''' '''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed ''' '''with \'huggingface\'.''' ) , ) if subparsers is not None: parser.set_defaults(func=_UpperCamelCase ) return parser def lowerCamelCase_ ( _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : Any = get_user_input() if args.config_file is not None: snake_case_ : Tuple = args.config_file else: if not os.path.isdir(_UpperCamelCase ): os.makedirs(_UpperCamelCase ) snake_case_ : Tuple = default_yaml_config_file if config_file.endswith('''.json''' ): config.to_json_file(_UpperCamelCase ) else: config.to_yaml_file(_UpperCamelCase ) print(f'''accelerate configuration saved at {config_file}''' ) def lowerCamelCase_ ( ) -> List[Any]: """simple docstring""" snake_case_ : Union[str, Any] = config_command_parser() snake_case_ : Any = parser.parse_args() config_command(_UpperCamelCase ) if __name__ == "__main__": main()
60
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : Dict ={ 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = '''canine''' def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size __SCREAMING_SNAKE_CASE : str = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act __SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Dict = initializer_range __SCREAMING_SNAKE_CASE : int = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps # Character config: __SCREAMING_SNAKE_CASE : Tuple = downsampling_rate __SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size __SCREAMING_SNAKE_CASE : Any = num_hash_functions __SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets __SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
696
0
# Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int ): """simple docstring""" lowerCAmelCase__ = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] lowerCAmelCase__ = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } lowerCAmelCase__ = F'{src_lang}-{tgt_lang}' lowerCAmelCase__ = F'\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = "facebook/wmt19-{src_lang}-{tgt_lang}"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = "{texts[src_lang]}"\ninput_ids = tokenizer.encode(input, return_tensors="pt")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n' os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ ) lowerCAmelCase__ = os.path.join(lowerCAmelCase_ , "README.md" ) print(F'Generating {path}' ) with open(lowerCAmelCase_ , "w" , encoding="utf-8" ) as f: f.write(lowerCAmelCase_ ) # make sure we are under the root of the project UpperCamelCase = Path(__file__).resolve().parent.parent.parent UpperCamelCase = repo_dir / 'model_cards' for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: UpperCamelCase , UpperCamelCase , UpperCamelCase = model_name.split('-') UpperCamelCase = model_cards_dir / 'facebook' / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
61
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : List[Any] =logging.get_logger(__name__) __lowerCAmelCase : Tuple ={ 'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl''' SCREAMING_SNAKE_CASE__ : List[str] = ['''mems'''] SCREAMING_SNAKE_CASE__ : List[Any] = { '''n_token''': '''vocab_size''', '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str: __SCREAMING_SNAKE_CASE : str = vocab_size __SCREAMING_SNAKE_CASE : Tuple = [] self.cutoffs.extend(lowerCAmelCase__ ) if proj_share_all_but_first: __SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs ) else: __SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs ) __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed __SCREAMING_SNAKE_CASE : Tuple = d_head __SCREAMING_SNAKE_CASE : Dict = d_inner __SCREAMING_SNAKE_CASE : Optional[Any] = div_val __SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm __SCREAMING_SNAKE_CASE : List[str] = n_layer __SCREAMING_SNAKE_CASE : int = n_head __SCREAMING_SNAKE_CASE : str = mem_len __SCREAMING_SNAKE_CASE : Union[str, Any] = same_length __SCREAMING_SNAKE_CASE : str = attn_type __SCREAMING_SNAKE_CASE : Dict = clamp_len __SCREAMING_SNAKE_CASE : Tuple = sample_softmax __SCREAMING_SNAKE_CASE : Optional[int] = adaptive __SCREAMING_SNAKE_CASE : int = dropout __SCREAMING_SNAKE_CASE : Optional[Any] = dropatt __SCREAMING_SNAKE_CASE : int = untie_r __SCREAMING_SNAKE_CASE : Optional[int] = init __SCREAMING_SNAKE_CASE : List[str] = init_range __SCREAMING_SNAKE_CASE : Any = proj_init_std __SCREAMING_SNAKE_CASE : List[str] = init_std __SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def __magic_name__( self :str ) -> int: # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
696
0
from __future__ import annotations snake_case = [ [-1, 0], # left [0, -1], # down [1, 0], # right [0, 1], # up ] def lowerCamelCase__ ( lowercase , lowercase , lowercase , lowercase , lowercase , ): """simple docstring""" SCREAMING_SNAKE_CASE : int = [ [0 for col in range(len(grid[0] ) )] for row in range(len(lowercase ) ) ] # the reference grid SCREAMING_SNAKE_CASE : Any = 1 SCREAMING_SNAKE_CASE : int = [ [0 for col in range(len(grid[0] ) )] for row in range(len(lowercase ) ) ] # the action grid SCREAMING_SNAKE_CASE : Tuple = init[0] SCREAMING_SNAKE_CASE : str = init[1] SCREAMING_SNAKE_CASE : Union[str, Any] = 0 SCREAMING_SNAKE_CASE : Tuple = g + heuristic[x][y] # cost from starting cell to destination cell SCREAMING_SNAKE_CASE : str = [[f, g, x, y]] SCREAMING_SNAKE_CASE : Any = False # flag that is set when search is complete SCREAMING_SNAKE_CASE : List[Any] = False # flag set if we can't find expand while not found and not resign: if len(lowercase ) == 0: raise ValueError("Algorithm is unable to find solution" ) else: # to choose the least costliest action so as to move closer to the goal cell.sort() cell.reverse() SCREAMING_SNAKE_CASE : Dict = cell.pop() SCREAMING_SNAKE_CASE : str = next_cell[2] SCREAMING_SNAKE_CASE : Dict = next_cell[3] SCREAMING_SNAKE_CASE : Tuple = next_cell[1] if x == goal[0] and y == goal[1]: SCREAMING_SNAKE_CASE : Optional[Any] = True else: for i in range(len(lowercase ) ): # to try out different valid actions SCREAMING_SNAKE_CASE : Optional[Any] = x + DIRECTIONS[i][0] SCREAMING_SNAKE_CASE : str = y + DIRECTIONS[i][1] if xa >= 0 and xa < len(lowercase ) and ya >= 0 and ya < len(grid[0] ): if closed[xa][ya] == 0 and grid[xa][ya] == 0: SCREAMING_SNAKE_CASE : List[Any] = g + cost SCREAMING_SNAKE_CASE : Dict = ga + heuristic[xa][ya] cell.append([fa, ga, xa, ya] ) SCREAMING_SNAKE_CASE : Optional[int] = 1 SCREAMING_SNAKE_CASE : Tuple = i SCREAMING_SNAKE_CASE : Any = [] SCREAMING_SNAKE_CASE : Optional[Any] = goal[0] SCREAMING_SNAKE_CASE : Any = goal[1] invpath.append([x, y] ) # we get the reverse path from here while x != init[0] or y != init[1]: SCREAMING_SNAKE_CASE : List[str] = x - DIRECTIONS[action[x][y]][0] SCREAMING_SNAKE_CASE : Optional[int] = y - DIRECTIONS[action[x][y]][1] SCREAMING_SNAKE_CASE : int = xa SCREAMING_SNAKE_CASE : Optional[Any] = ya invpath.append([x, y] ) SCREAMING_SNAKE_CASE : Any = [] for i in range(len(lowercase ) ): path.append(invpath[len(lowercase ) - 1 - i] ) return path, action if __name__ == "__main__": snake_case = [ [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 1, 0, 0, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0], ] snake_case = [0, 0] # all coordinates are given in format [y,x] snake_case = [len(grid) - 1, len(grid[0]) - 1] snake_case = 1 # the cost map which pushes the path closer to the goal snake_case = [[0 for row in range(len(grid[0]))] for col in range(len(grid))] for i in range(len(grid)): for j in range(len(grid[0])): snake_case = abs(i - goal[0]) + abs(j - goal[1]) if grid[i][j] == 1: # added extra penalty in the heuristic map snake_case = 99 snake_case , snake_case = search(grid, init, goal, cost, heuristic) print("""ACTION MAP""") for i in range(len(action)): print(action[i]) for i in range(len(path)): print(path[i])
62
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : str =logging.get_logger(__name__) __lowerCAmelCase : Any ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert''' def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = vocab_size __SCREAMING_SNAKE_CASE : List[str] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : str = initializer_range __SCREAMING_SNAKE_CASE : Dict = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = position_embedding_type __SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
696
0
from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCamelCase__ ( ): __UpperCAmelCase : int = { """repo_name""": ["""test_repo1""", """test_repo2""", """test_repo3"""], """path""": ["""test_1.py""", """test_2.py""", """unit_test.py"""], """content""": ["""a """ * 20, """a """ * 30, """b """ * 7], } __UpperCAmelCase : Dict = Dataset.from_dict(__lowerCamelCase ) return dataset class a ( lowercase__ ): """simple docstring""" def UpperCAmelCase ( self : Tuple ) -> List[Any]: __UpperCAmelCase : Optional[Any] = get_dataset() __UpperCAmelCase : Dict = make_duplicate_clusters(__lowercase , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def UpperCAmelCase ( self : Optional[Any] ) -> Any: __UpperCAmelCase : int = get_dataset() __UpperCAmelCase , __UpperCAmelCase : Dict = deduplicate_dataset(__lowercase ) self.assertEqual(len(__lowercase ) , 2 ) print(__lowercase ) self.assertEqual(duplicate_clusters[0][0]["""copies"""] , 2 ) self.assertEqual(duplicate_clusters[0][0]["""is_extreme"""] , __lowercase )
63
import os import sys import unittest __lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers') __lowerCAmelCase : Optional[Any] ='\n{0} = None\n' __lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' __lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Tuple ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tokenizers''' ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' ) __SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' ) __SCREAMING_SNAKE_CASE : Any = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' ) __SCREAMING_SNAKE_CASE : List[str] = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' ) def __magic_name__( self :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , lowerCAmelCase__ ) self.assertIn('''tensorflow_text''' , lowerCAmelCase__ ) self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def __magic_name__( self :Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' ) __SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __SCREAMING_SNAKE_CASE : int = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
696
0
import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging lowercase_ : Tuple = logging.get_logger(__name__) class _lowerCamelCase ( UpperCamelCase_ ): __a = ["input_values", "attention_mask"] def __init__( self , lowerCAmelCase = 1 , lowerCAmelCase = 16000 , lowerCAmelCase = 0.0 , lowerCAmelCase = False , lowerCAmelCase = 80 , lowerCAmelCase = 16 , lowerCAmelCase = 64 , lowerCAmelCase = "hann_window" , lowerCAmelCase = 1.0 , lowerCAmelCase = 80 , lowerCAmelCase = 7600 , lowerCAmelCase = 1e-10 , lowerCAmelCase = 2 , lowerCAmelCase = True , **lowerCAmelCase , ) -> str: super().__init__(feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase ) SCREAMING_SNAKE_CASE__: Tuple= do_normalize SCREAMING_SNAKE_CASE__: Optional[Any]= return_attention_mask SCREAMING_SNAKE_CASE__: Optional[int]= num_mel_bins SCREAMING_SNAKE_CASE__: Union[str, Any]= hop_length SCREAMING_SNAKE_CASE__: Optional[int]= win_length SCREAMING_SNAKE_CASE__: Dict= win_function SCREAMING_SNAKE_CASE__: str= frame_signal_scale SCREAMING_SNAKE_CASE__: Optional[int]= fmin SCREAMING_SNAKE_CASE__: Any= fmax SCREAMING_SNAKE_CASE__: Union[str, Any]= mel_floor SCREAMING_SNAKE_CASE__: Tuple= reduction_factor SCREAMING_SNAKE_CASE__: Dict= win_length * sampling_rate // 1000 SCREAMING_SNAKE_CASE__: int= hop_length * sampling_rate // 1000 SCREAMING_SNAKE_CASE__: List[str]= optimal_fft_length(self.sample_size ) SCREAMING_SNAKE_CASE__: List[Any]= (self.n_fft // 2) + 1 SCREAMING_SNAKE_CASE__: List[str]= window_function(window_length=self.sample_size , name=self.win_function , periodic=lowerCAmelCase ) SCREAMING_SNAKE_CASE__: List[Any]= mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCAmelCase , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , lowerCAmelCase , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def UpperCamelCase_ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: SCREAMING_SNAKE_CASE__: Any= np.array(lowerCAmelCase , np.intaa ) SCREAMING_SNAKE_CASE__: Optional[Any]= [] for vector, length in zip(lowerCAmelCase , attention_mask.sum(-1 ) ): SCREAMING_SNAKE_CASE__: str= (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: SCREAMING_SNAKE_CASE__: Optional[int]= padding_value normed_input_values.append(lowerCAmelCase ) else: SCREAMING_SNAKE_CASE__: List[str]= [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def UpperCamelCase_ ( self , lowerCAmelCase , ) -> np.ndarray: SCREAMING_SNAKE_CASE__: Tuple= spectrogram( lowerCAmelCase , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature: if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if audio is not None: SCREAMING_SNAKE_CASE__: str= self._process_audio( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , ) else: SCREAMING_SNAKE_CASE__: int= None if audio_target is not None: SCREAMING_SNAKE_CASE__: Union[str, Any]= self._process_audio( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , **lowerCAmelCase , ) if inputs is None: return inputs_target else: SCREAMING_SNAKE_CASE__: Tuple= inputs_target['''input_values'''] SCREAMING_SNAKE_CASE__: List[str]= inputs_target.get('''attention_mask''' ) if decoder_attention_mask is not None: SCREAMING_SNAKE_CASE__: Dict= decoder_attention_mask return inputs def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase = False , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ) -> BatchFeature: SCREAMING_SNAKE_CASE__: Tuple= isinstance(lowerCAmelCase , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) SCREAMING_SNAKE_CASE__: int= is_batched_numpy or ( isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: SCREAMING_SNAKE_CASE__: Optional[int]= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ): SCREAMING_SNAKE_CASE__: Dict= np.asarray(lowerCAmelCase , dtype=np.floataa ) elif isinstance(lowerCAmelCase , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE__: int= speech.astype(np.floataa ) # always return batch if not is_batched: SCREAMING_SNAKE_CASE__: List[str]= [speech] # needed to make pad() work on spectrogram inputs SCREAMING_SNAKE_CASE__: List[str]= self.feature_size # convert into correct format for padding if is_target: SCREAMING_SNAKE_CASE__: List[str]= [self._extract_mel_features(lowerCAmelCase ) for waveform in speech] SCREAMING_SNAKE_CASE__: int= BatchFeature({'''input_values''': features} ) SCREAMING_SNAKE_CASE__: str= self.num_mel_bins else: SCREAMING_SNAKE_CASE__: Union[str, Any]= BatchFeature({'''input_values''': speech} ) SCREAMING_SNAKE_CASE__: Union[str, Any]= self.pad( lowerCAmelCase , padding=lowerCAmelCase , max_length=lowerCAmelCase , truncation=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_attention_mask=lowerCAmelCase , **lowerCAmelCase , ) SCREAMING_SNAKE_CASE__: str= feature_size_hack # convert input values to correct format SCREAMING_SNAKE_CASE__: Union[str, Any]= padded_inputs['''input_values'''] if not isinstance(input_values[0] , np.ndarray ): SCREAMING_SNAKE_CASE__: Dict= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for array in input_values] elif ( not isinstance(lowerCAmelCase , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): SCREAMING_SNAKE_CASE__: List[str]= [array.astype(np.floataa ) for array in input_values] elif isinstance(lowerCAmelCase , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): SCREAMING_SNAKE_CASE__: Dict= input_values.astype(np.floataa ) # convert attention_mask to correct format SCREAMING_SNAKE_CASE__: Union[str, Any]= padded_inputs.get('''attention_mask''' ) if attention_mask is not None: SCREAMING_SNAKE_CASE__: Optional[Any]= [np.asarray(lowerCAmelCase , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: SCREAMING_SNAKE_CASE__: List[str]= ( attention_mask if self._get_padding_strategies(lowerCAmelCase , max_length=lowerCAmelCase ) is not PaddingStrategy.DO_NOT_PAD else None ) SCREAMING_SNAKE_CASE__: Any= self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=lowerCAmelCase , padding_value=self.padding_value ) if return_tensors is not None: SCREAMING_SNAKE_CASE__: List[str]= padded_inputs.convert_to_tensors(lowerCAmelCase ) return padded_inputs def UpperCamelCase_ ( self ) -> Dict[str, Any]: SCREAMING_SNAKE_CASE__: Tuple= super().to_dict() # Don't serialize these as they are derived from the other properties. SCREAMING_SNAKE_CASE__: List[str]= ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs'''] for name in names: if name in output: del output[name] return output
64
import math from numpy import inf from scipy.integrate import quad def _UpperCamelCase ( lowercase__ ): if num <= 0: raise ValueError('''math domain error''' ) return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0] def _UpperCamelCase ( lowercase__ , lowercase__ ): return math.pow(lowercase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
696
0
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Sequence from dataclasses import dataclass from typing import Any @dataclass class __lowercase : snake_case_ = 42 snake_case_ = None snake_case_ = None def lowerCAmelCase ( ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = Node(1 ) UpperCAmelCase__ : int = Node(2 ) UpperCAmelCase__ : str = Node(3 ) UpperCAmelCase__ : List[str] = Node(4 ) UpperCAmelCase__ : int = Node(5 ) return tree def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return [root.data, *preorder(root.left ), *preorder(root.right )] if root else [] def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return postorder(root.left ) + postorder(root.right ) + [root.data] if root else [] def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return [*inorder(root.left ), root.data, *inorder(root.right )] if root else [] def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0 def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : list[Any] = [] if root is None: return output UpperCAmelCase__ : int = deque([root] ) while process_queue: UpperCAmelCase__ : Union[str, Any] = process_queue.popleft() output.append(node.data ) if node.left: process_queue.append(node.left ) if node.right: process_queue.append(node.right ) return output def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : list[Any] = [] def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None: if not root: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.left , level - 1 ) populate_output(root.right , level - 1 ) populate_output(__UpperCamelCase , __UpperCamelCase ) return output def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ): '''simple docstring''' UpperCAmelCase__ : list[Any] = [] def populate_output(__UpperCamelCase , __UpperCamelCase ) -> None: if root is None: return if level == 1: output.append(root.data ) elif level > 1: populate_output(root.right , level - 1 ) populate_output(root.left , level - 1 ) populate_output(__UpperCamelCase , __UpperCamelCase ) return output def lowerCAmelCase ( __UpperCamelCase ): '''simple docstring''' if root is None: return [] UpperCAmelCase__ : list[Sequence[Node | None]] = [] UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : Union[str, Any] = height(__UpperCamelCase ) for h in range(1 , height_tree + 1 ): if not flag: output.append(get_nodes_from_left_to_right(__UpperCamelCase , __UpperCamelCase ) ) UpperCAmelCase__ : Tuple = 1 else: output.append(get_nodes_from_right_to_left(__UpperCamelCase , __UpperCamelCase ) ) UpperCAmelCase__ : Union[str, Any] = 0 return output def lowerCAmelCase ( ): # Main function for testing. '''simple docstring''' UpperCAmelCase__ : Tuple = make_tree() print(F"In-order Traversal: {inorder(__UpperCamelCase )}" ) print(F"Pre-order Traversal: {preorder(__UpperCamelCase )}" ) print(F"Post-order Traversal: {postorder(__UpperCamelCase )}" , """\n""" ) print(F"Height of Tree: {height(__UpperCamelCase )}" , """\n""" ) print("""Complete Level Order Traversal: """ ) print(level_order(__UpperCamelCase ) , """\n""" ) print("""Level-wise order Traversal: """ ) for level in range(1 , height(__UpperCamelCase ) + 1 ): print(F"Level {level}:" , get_nodes_from_left_to_right(__UpperCamelCase , level=__UpperCamelCase ) ) print("""\nZigZag order Traversal: """ ) print(zigzag(__UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
65
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate __SCREAMING_SNAKE_CASE : int = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
696
0
import argparse import hashlib # hashlib is only used inside the Test class import struct class lowerCAmelCase_ : def __init__( self , _lowerCAmelCase ): _lowercase : int = data _lowercase : Optional[int] = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0] @staticmethod def __a ( _lowerCAmelCase , _lowerCAmelCase ): return ((n << b) | (n >> (3_2 - b))) & 0xff_fff_fff def __a ( self ): _lowercase : Union[str, Any] = b'\x80' + b'\x00' * (6_3 - (len(self.data ) + 8) % 6_4) _lowercase : List[Any] = self.data + padding + struct.pack('>Q' , 8 * len(self.data ) ) return padded_data def __a ( self ): return [ self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 ) ] def __a ( self , _lowerCAmelCase ): _lowercase : str = list(struct.unpack('>16L' , _lowerCAmelCase ) ) + [0] * 6_4 for i in range(1_6 , 8_0 ): _lowercase : List[Any] = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 ) return w def __a ( self ): _lowercase : Dict = self.padding() _lowercase : int = self.split_blocks() for block in self.blocks: _lowercase : List[str] = self.expand_block(_lowerCAmelCase ) _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : List[Any] = self.h for i in range(0 , 8_0 ): if 0 <= i < 2_0: _lowercase : Any = (b & c) | ((~b) & d) _lowercase : List[Any] = 0x5a_827_999 elif 2_0 <= i < 4_0: _lowercase : Optional[Any] = b ^ c ^ d _lowercase : Union[str, Any] = 0x6e_d9e_ba1 elif 4_0 <= i < 6_0: _lowercase : Optional[int] = (b & c) | (b & d) | (c & d) _lowercase : Tuple = 0x8f_1bb_cdc elif 6_0 <= i < 8_0: _lowercase : Any = b ^ c ^ d _lowercase : Optional[Any] = 0xca_62c_1d6 _lowercase , _lowercase , _lowercase , _lowercase , _lowercase : Union[str, Any] = ( self.rotate(_lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff, a, self.rotate(_lowerCAmelCase , 3_0 ), c, d, ) _lowercase : Optional[Any] = ( self.h[0] + a & 0xff_fff_fff, self.h[1] + b & 0xff_fff_fff, self.h[2] + c & 0xff_fff_fff, self.h[3] + d & 0xff_fff_fff, self.h[4] + e & 0xff_fff_fff, ) return ("{:08x}" * 5).format(*self.h ) def __magic_name__ ( ) -> List[Any]: _lowercase : Union[str, Any] = b'Test String' assert SHAaHash(SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324 def __magic_name__ ( ) -> List[str]: _lowercase : List[Any] = argparse.ArgumentParser(description='Process some strings or files' ) parser.add_argument( '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , ) parser.add_argument('--file' , dest='input_file' , help='Hash contents of a file' ) _lowercase : Optional[Any] = parser.parse_args() _lowercase : Any = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , 'rb' ) as f: _lowercase : Dict = f.read() else: _lowercase : List[Any] = bytes(SCREAMING_SNAKE_CASE , 'utf-8' ) print(SHAaHash(SCREAMING_SNAKE_CASE ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
66
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ ) if is_square_free(lowercase__ ): return -1 if len(lowercase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
696
0
from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging snake_case = logging.get_logger(__name__) class A_ ( UpperCAmelCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = ['''pixel_values'''] def __init__( self : Union[str, Any] ,__A : bool = True ,__A : Dict[str, int] = None ,__A : PILImageResampling = PILImageResampling.BICUBIC ,__A : bool = True ,__A : Dict[str, int] = None ,__A : bool = True ,__A : Union[int, float] = 1 / 255 ,__A : bool = True ,__A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN ,__A : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD ,**__A : Optional[int] ,) -> None: super().__init__(**__A ) _lowercase = size if size is not None else {'shortest_edge': 224} _lowercase = get_size_dict(__A ,default_to_square=__A ) _lowercase = crop_size if crop_size is not None else {'height': 224, 'width': 224} _lowercase = get_size_dict(__A ,param_name='crop_size' ) _lowercase = do_resize _lowercase = size _lowercase = resample _lowercase = do_center_crop _lowercase = crop_size _lowercase = do_rescale _lowercase = rescale_factor _lowercase = do_normalize _lowercase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowercase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def __UpperCAmelCase ( self : int ,__A : np.ndarray ,__A : Dict[str, int] ,__A : PILImageResampling = PILImageResampling.BICUBIC ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : List[str] ,) -> np.ndarray: _lowercase = get_size_dict(__A ,default_to_square=__A ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _lowercase = int((256 / 224) * size['shortest_edge'] ) _lowercase = get_resize_output_image_size(__A ,size=__A ,default_to_square=__A ) _lowercase = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( F"""Size dict must have keys 'height' and 'width' or 'shortest_edge'. Got {size_dict.keys()}""" ) return resize( __A ,size=(size_dict['height'], size_dict['width']) ,resample=__A ,data_format=__A ,**__A ) def __UpperCAmelCase ( self : int ,__A : np.ndarray ,__A : Dict[str, int] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : Optional[int] ,) -> np.ndarray: _lowercase = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(F"""Size dict must have keys 'height' and 'width'. Got {size.keys()}""" ) return center_crop(__A ,size=(size['height'], size['width']) ,data_format=__A ,**__A ) def __UpperCAmelCase ( self : Optional[int] ,__A : np.ndarray ,__A : Union[int, float] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : List[str] ,) -> np.ndarray: return rescale(__A ,scale=__A ,data_format=__A ,**__A ) def __UpperCAmelCase ( self : str ,__A : np.ndarray ,__A : Union[float, List[float]] ,__A : Union[float, List[float]] ,__A : Optional[Union[str, ChannelDimension]] = None ,**__A : int ,) -> np.ndarray: return normalize(__A ,mean=__A ,std=__A ,data_format=__A ,**__A ) def __UpperCAmelCase ( self : Dict ,__A : ImageInput ,__A : Optional[bool] = None ,__A : Optional[Dict[str, int]] = None ,__A : PILImageResampling = None ,__A : Optional[bool] = None ,__A : Optional[Dict[str, int]] = None ,__A : Optional[bool] = None ,__A : Optional[float] = None ,__A : Optional[bool] = None ,__A : Optional[Union[float, Iterable[float]]] = None ,__A : Optional[Union[float, Iterable[float]]] = None ,__A : Optional[TensorType] = None ,__A : ChannelDimension = ChannelDimension.FIRST ,**__A : Optional[Any] ,) -> BatchFeature: _lowercase = do_resize if do_resize is not None else self.do_resize _lowercase = resample if resample is not None else self.resample _lowercase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowercase = do_rescale if do_rescale is not None else self.do_rescale _lowercase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowercase = do_normalize if do_normalize is not None else self.do_normalize _lowercase = image_mean if image_mean is not None else self.image_mean _lowercase = image_std if image_std is not None else self.image_std _lowercase = size if size is not None else self.size _lowercase = get_size_dict(__A ,default_to_square=__A ) _lowercase = crop_size if crop_size is not None else self.crop_size _lowercase = get_size_dict(__A ,param_name='crop_size' ) _lowercase = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.' ) if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # All transformations expect numpy arrays. _lowercase = [to_numpy_array(__A ) for image in images] if do_resize: _lowercase = [self.resize(__A ,__A ,__A ) for image in images] if do_center_crop: _lowercase = [self.center_crop(__A ,__A ) for image in images] if do_rescale: _lowercase = [self.rescale(__A ,__A ) for image in images] if do_normalize: _lowercase = [self.normalize(__A ,__A ,__A ) for image in images] _lowercase = [to_channel_dimension_format(__A ,__A ) for image in images] _lowercase = {'pixel_values': images} return BatchFeature(data=__A ,tensor_type=__A )
67
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __lowerCAmelCase : int ={ 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_2_8, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 5_0, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 1_0, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 1_0, 'exponential_decay_length_penalty': (5, 1.0_1), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class _lowercase ( unittest.TestCase ): '''simple docstring''' @classmethod def __magic_name__( cls :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = TOKEN HfFolder.save_token(lowerCAmelCase__ ) @classmethod def __magic_name__( cls :List[str] ) -> List[str]: try: delete_repo(token=cls._token , repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''' ) except HTTPError: pass def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''test-config''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :Dict ) -> Optional[int]: CustomConfig.register_for_auto_class() __SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 ) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' ) self.assertEqual(new_config.attribute , 42 ) class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :List[str] ) -> Dict: __SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int __SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float __SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool __SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str c.update_from_string( f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' ) self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' ) self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' ) self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' ) def __magic_name__( self :Dict ) -> str: __SCREAMING_SNAKE_CASE : Dict = PretrainedConfig() __SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) __SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )] if len(lowerCAmelCase__ ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' f''' {', '.join(lowerCAmelCase__ )}.''' ) def __magic_name__( self :Union[str, Any] ) -> List[Any]: with self.assertRaises(lowerCAmelCase__ ): # config is in subfolder, the following should not work without specifying the subfolder __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) __SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' ) self.assertIsNotNone(lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down __SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock() __SCREAMING_SNAKE_CASE : List[Any] = 500 __SCREAMING_SNAKE_CASE : Union[str, Any] = {} __SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError __SCREAMING_SNAKE_CASE : str = {} # Download this model to make sure it's in the cache. __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head: __SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def __magic_name__( self :Union[str, Any] ) -> List[Any]: # This test is for deprecated behavior and can be removed in v5 __SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def __magic_name__( self :str ) -> List[str]: __SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = 2 json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json'''] __SCREAMING_SNAKE_CASE : Tuple = 768 configuration.save_pretrained(lowerCAmelCase__ ) shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) ) __SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 768 ) def __magic_name__( self :List[str] ) -> Union[str, Any]: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. __SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers __SCREAMING_SNAKE_CASE : int = '''v4.0.0''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained( lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCAmelCase__ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0''' __SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(old_configuration.hidden_size , 768 )
696
0
import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class _A ( unittest.TestCase ): """simple docstring""" def _a ( self : Any ) -> int: __UpperCAmelCase =logging.get_logger() # the current default level is logging.WARNING __UpperCAmelCase =logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(__SCREAMING_SNAKE_CASE ) def _a ( self : Dict ) -> Union[str, Any]: __UpperCAmelCase =logging.get_verbosity() __UpperCAmelCase =logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __UpperCAmelCase ="""Testing 1, 2, 3""" # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl: logger.warning(__SCREAMING_SNAKE_CASE ) self.assertEqual(cl.out , msg + """\n""" ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl: logger.warning(__SCREAMING_SNAKE_CASE ) self.assertEqual(cl.out , """""" ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl: logger.warning(__SCREAMING_SNAKE_CASE ) self.assertEqual(cl.out , msg + """\n""" ) # restore to the original level logging.set_verbosity(__SCREAMING_SNAKE_CASE ) @mockenv(TRANSFORMERS_VERBOSITY="""error""" ) def _a ( self : Dict ) -> List[str]: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() # this action activates the env var __UpperCAmelCase =logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __UpperCAmelCase =os.getenv("""TRANSFORMERS_VERBOSITY""" , __SCREAMING_SNAKE_CASE ) __UpperCAmelCase =logging.log_levels[env_level_str] __UpperCAmelCase =logging.get_verbosity() self.assertEqual( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , f'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , ) # restore to the original level __UpperCAmelCase ="""""" transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY="""super-error""" ) def _a ( self : Union[str, Any] ) -> str: # reset for the env var to take effect, next time some logger call is made transformers.utils.logging._reset_library_root_logger() __UpperCAmelCase =logging.logging.getLogger() with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl: # this action activates the env var logging.get_logger("""transformers.models.bart.tokenization_bart""" ) self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out ) # no need to restore as nothing was changed def _a ( self : Dict ) -> Optional[int]: # testing `logger.warning_advice()` transformers.utils.logging._reset_library_root_logger() __UpperCAmelCase =logging.get_logger("""transformers.models.bart.tokenization_bart""" ) __UpperCAmelCase ="""Testing 1, 2, 3""" with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ): # nothing should be logged as env var disables this method with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl: logger.warning_advice(__SCREAMING_SNAKE_CASE ) self.assertEqual(cl.out , """""" ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(__SCREAMING_SNAKE_CASE ) as cl: logger.warning_advice(__SCREAMING_SNAKE_CASE ) self.assertEqual(cl.out , msg + """\n""" ) def lowercase__ ( ) -> Optional[int]: """simple docstring""" disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
68
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Any ={ 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int =['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str =[ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
696
0
'''simple docstring''' import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def __UpperCAmelCase ( _UpperCAmelCase : List[str] ) -> List[str]: __snake_case = [2, 2, 6, 2] if "tiny" in model_name else [2, 2, 18, 2] __snake_case = True if "large" in model_name or "huge" in model_name else False __snake_case = True if "large" in model_name or "huge" in model_name else False __snake_case = True if "large" in model_name or "huge" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __snake_case = [3, 3, 3, 3] __snake_case = [5, 5, 5, 5] elif "fl4" in model_name: __snake_case = [4, 4, 4, 4] __snake_case = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __snake_case = [3, 3, 3, 3] if "lrf" in model_name: __snake_case = [3, 3, 3, 3] else: __snake_case = [2, 2, 2, 2] if "tiny" in model_name: __snake_case = 96 elif "small" in model_name: __snake_case = 96 elif "base" in model_name: __snake_case = 1_28 elif "large" in model_name: __snake_case = 1_92 elif "xlarge" in model_name: __snake_case = 2_56 elif "huge" in model_name: __snake_case = 3_52 # set label information __snake_case = "huggingface/label-files" if "large" in model_name or "huge" in model_name: __snake_case = "imagenet-22k-id2label.json" else: __snake_case = "imagenet-1k-id2label.json" __snake_case = json.load(open(hf_hub_download(_UpperCAmelCase , _UpperCAmelCase , repo_type="dataset" ) , "r" ) ) __snake_case = {int(_UpperCAmelCase ): v for k, v in idalabel.items()} __snake_case = {v: k for k, v in idalabel.items()} __snake_case = FocalNetConfig( embed_dim=_UpperCAmelCase , depths=_UpperCAmelCase , focal_levels=_UpperCAmelCase , focal_windows=_UpperCAmelCase , use_conv_embed=_UpperCAmelCase , idalabel=_UpperCAmelCase , labelaid=_UpperCAmelCase , use_post_layernorm=_UpperCAmelCase , use_layerscale=_UpperCAmelCase , ) return config def __UpperCAmelCase ( _UpperCAmelCase : Tuple ) -> Optional[Any]: if "patch_embed.proj" in name: __snake_case = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: __snake_case = name.replace("patch_embed.norm" , "embeddings.norm" ) if "layers" in name: __snake_case = "encoder." + name if "encoder.layers" in name: __snake_case = name.replace("encoder.layers" , "encoder.stages" ) if "downsample.proj" in name: __snake_case = name.replace("downsample.proj" , "downsample.projection" ) if "blocks" in name: __snake_case = name.replace("blocks" , "layers" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __snake_case = name.replace("modulation.f" , "modulation.projection_in" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __snake_case = name.replace("modulation.h" , "modulation.projection_context" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __snake_case = name.replace("modulation.proj" , "modulation.projection_out" ) if name == "norm.weight": __snake_case = "layernorm.weight" if name == "norm.bias": __snake_case = "layernorm.bias" if "head" in name: __snake_case = name.replace("head" , "classifier" ) else: __snake_case = "focalnet." + name return name def __UpperCAmelCase ( _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str]=False ) -> Dict: # fmt: off __snake_case = { "focalnet-tiny": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth", "focalnet-tiny-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth", "focalnet-small": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth", "focalnet-small-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth", "focalnet-base": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth", "focalnet-base-lrf": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth", "focalnet-large-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth", "focalnet-large-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth", "focalnet-xlarge-lrf-fl3": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth", "focalnet-xlarge-lrf-fl4": "https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth", } # fmt: on __snake_case = model_name_to_url[model_name] print("Checkpoint URL: " , _UpperCAmelCase ) __snake_case = torch.hub.load_state_dict_from_url(_UpperCAmelCase , map_location="cpu" )["model"] # rename keys for key in state_dict.copy().keys(): __snake_case = state_dict.pop(_UpperCAmelCase ) __snake_case = val __snake_case = get_focalnet_config(_UpperCAmelCase ) __snake_case = FocalNetForImageClassification(_UpperCAmelCase ) model.eval() # load state dict model.load_state_dict(_UpperCAmelCase ) # verify conversion __snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg" __snake_case = BitImageProcessor( do_resize=_UpperCAmelCase , size={"shortest_edge": 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCAmelCase , crop_size=2_24 , do_normalize=_UpperCAmelCase , image_mean=_UpperCAmelCase , image_std=_UpperCAmelCase , ) __snake_case = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw ) __snake_case = processor(images=_UpperCAmelCase , return_tensors="pt" ) __snake_case = transforms.Compose( [ transforms.Resize(2_56 ), transforms.CenterCrop(2_24 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __snake_case = image_transforms(_UpperCAmelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _UpperCAmelCase , atol=1E-4 ) __snake_case = model(**_UpperCAmelCase ) __snake_case = outputs.logits.argmax(-1 ).item() print("Predicted class:" , model.config.idalabel[predicted_class_idx] ) print("First values of logits:" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __snake_case = torch.tensor([0.2166, -0.4368, 0.2191] ) elif model_name == "focalnet-tiny-lrf": __snake_case = torch.tensor([1.1669, 0.0125, -0.1695] ) elif model_name == "focalnet-small": __snake_case = torch.tensor([0.4917, -0.0430, 0.1341] ) elif model_name == "focalnet-small-lrf": __snake_case = torch.tensor([-0.2588, -0.5342, -0.2331] ) elif model_name == "focalnet-base": __snake_case = torch.tensor([-0.1655, -0.4090, -0.1730] ) elif model_name == "focalnet-base-lrf": __snake_case = torch.tensor([0.5306, -0.0483, -0.3928] ) assert torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1E-4 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCAmelCase ) processor.save_pretrained(_UpperCAmelCase ) if push_to_hub: print(F'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(F'''{model_name}''' ) processor.push_to_hub(F'''{model_name}''' ) if __name__ == "__main__": a : int = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''focalnet-tiny''', type=str, help='''Name of the FocalNet model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub.''', ) a : List[Any] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
69
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : List[Any] ={ 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers''' SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values'''] SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple: __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : Optional[int] = d_kv __SCREAMING_SNAKE_CASE : Tuple = d_ff __SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers __SCREAMING_SNAKE_CASE : List[Any] = num_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers else: __SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: __SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers __SCREAMING_SNAKE_CASE : List[Any] = num_heads __SCREAMING_SNAKE_CASE : List[Any] = num_experts __SCREAMING_SNAKE_CASE : Tuple = expert_capacity __SCREAMING_SNAKE_CASE : List[Any] = router_bias __SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) __SCREAMING_SNAKE_CASE : List[Any] = router_dtype __SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens __SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets __SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance __SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate __SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon __SCREAMING_SNAKE_CASE : int = initializer_factor __SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj __SCREAMING_SNAKE_CASE : Any = use_cache __SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs __SCREAMING_SNAKE_CASE : int = router_z_loss_coef __SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef __SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' ) __SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1] __SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated''' if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new''' super().__init__( pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
696
0
from math import ceil, sqrt def _SCREAMING_SNAKE_CASE ( lowercase : int = 1_00_00_00 ): '''simple docstring''' lowerCamelCase_ = 0 for outer_width in range(3 , (limit // 4) + 2 ): if outer_width**2 > limit: lowerCamelCase_ = max(ceil(sqrt(outer_width**2 - limit ) ) , 1 ) else: lowerCamelCase_ = 1 if (outer_width - hole_width_lower_bound) % 2: hole_width_lower_bound += 1 answer += (outer_width - hole_width_lower_bound - 2) // 2 + 1 return answer if __name__ == "__main__": print(F"""{solution() = }""")
70
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
696
0
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging _lowerCamelCase = logging.get_logger(__name__) _lowerCamelCase = { """t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""", """t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""", """t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""", """t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""", """t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""", } class _snake_case (__SCREAMING_SNAKE_CASE): __A : List[Any] ="t5" __A : List[Any] =["past_key_values"] __A : int ={"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"} def __init__( self ,_snake_case=3_21_28 ,_snake_case=5_12 ,_snake_case=64 ,_snake_case=20_48 ,_snake_case=6 ,_snake_case=None ,_snake_case=8 ,_snake_case=32 ,_snake_case=1_28 ,_snake_case=0.1 ,_snake_case=1E-6 ,_snake_case=1.0 ,_snake_case="relu" ,_snake_case=True ,_snake_case=True ,_snake_case=0 ,_snake_case=1 ,**_snake_case ,): UpperCAmelCase_ : int = vocab_size UpperCAmelCase_ : str = d_model UpperCAmelCase_ : Any = d_kv UpperCAmelCase_ : Any = d_ff UpperCAmelCase_ : str = num_layers UpperCAmelCase_ : List[Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry UpperCAmelCase_ : Optional[int] = num_heads UpperCAmelCase_ : List[str] = relative_attention_num_buckets UpperCAmelCase_ : int = relative_attention_max_distance UpperCAmelCase_ : Union[str, Any] = dropout_rate UpperCAmelCase_ : Optional[Any] = layer_norm_epsilon UpperCAmelCase_ : Any = initializer_factor UpperCAmelCase_ : List[Any] = feed_forward_proj UpperCAmelCase_ : List[str] = use_cache UpperCAmelCase_ : Any = self.feed_forward_proj.split("-" ) UpperCAmelCase_ : Union[str, Any] = act_info[-1] UpperCAmelCase_ : str = act_info[0] == "gated" if len(_snake_case ) > 1 and act_info[0] != "gated" or len(_snake_case ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' "Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. " "'gated-gelu' or 'relu'" ) # for backwards compatibility if feed_forward_proj == "gated-gelu": UpperCAmelCase_ : Any = "gelu_new" super().__init__( pad_token_id=_snake_case ,eos_token_id=_snake_case ,is_encoder_decoder=_snake_case ,**_snake_case ,) class _snake_case (__SCREAMING_SNAKE_CASE): @property def UpperCamelCase__ ( self ): UpperCAmelCase_ : Tuple = { "input_ids": {0: "batch", 1: "encoder_sequence"}, "attention_mask": {0: "batch", 1: "encoder_sequence"}, } if self.use_past: UpperCAmelCase_ : Union[str, Any] = "past_encoder_sequence + sequence" UpperCAmelCase_ : Any = {0: "batch"} UpperCAmelCase_ : Dict = {0: "batch", 1: "past_decoder_sequence + sequence"} else: UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} UpperCAmelCase_ : List[Any] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(_snake_case ,direction="inputs" ) return common_inputs @property def UpperCamelCase__ ( self ): return 13
71
from datetime import datetime import requests def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(lowercase__ ).content if __name__ == "__main__": __lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip() __lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f"""Done. Video saved to disk as {file_name}.""")
696
0
'''simple docstring''' # Logistic Regression from scratch # In[62]: # In[63]: # importing all the required libraries import numpy as np from matplotlib import pyplot as plt from sklearn import datasets def UpperCamelCase ( lowercase_ : Tuple ) -> Optional[Any]: '''simple docstring''' return 1 / (1 + np.exp(-z )) def UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Dict ) -> List[str]: '''simple docstring''' return (-y * np.log(lowercase_ ) - (1 - y) * np.log(1 - h )).mean() def UpperCamelCase ( lowercase_ : Optional[int] , lowercase_ : Tuple , lowercase_ : Tuple ) -> Union[str, Any]: '''simple docstring''' lowercase =np.dot(lowercase_ , lowercase_ ) return np.sum(y * scores - np.log(1 + np.exp(lowercase_ ) ) ) def UpperCamelCase ( lowercase_ : int , lowercase_ : Tuple , lowercase_ : str , lowercase_ : Optional[int]=7_0_0_0_0 ) -> str: '''simple docstring''' lowercase =np.zeros(x.shape[1] ) for iterations in range(lowercase_ ): lowercase =np.dot(lowercase_ , lowercase_ ) lowercase =sigmoid_function(lowercase_ ) lowercase =np.dot(x.T , h - y ) / y.size lowercase =theta - alpha * gradient # updating the weights lowercase =np.dot(lowercase_ , lowercase_ ) lowercase =sigmoid_function(lowercase_ ) lowercase =cost_function(lowercase_ , lowercase_ ) if iterations % 1_0_0 == 0: print(f'loss: {j} \t' ) # printing the loss after every 100 iterations return theta # In[68]: if __name__ == "__main__": _UpperCAmelCase : List[str] = datasets.load_iris() _UpperCAmelCase : Optional[Any] = iris.data[:, :2] _UpperCAmelCase : Union[str, Any] = (iris.target != 0) * 1 _UpperCAmelCase : List[str] = 0.1 _UpperCAmelCase : List[Any] = logistic_reg(alpha, x, y, max_iterations=7_00_00) print('''theta: ''', theta) # printing the theta i.e our weights vector def UpperCamelCase ( lowercase_ : Optional[Any] ) -> Optional[int]: '''simple docstring''' return sigmoid_function( np.dot(lowercase_ , lowercase_ ) ) # predicting the value of probability from the logistic regression algorithm plt.figure(figsize=(10, 6)) plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''') plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''') ((_UpperCAmelCase) , (_UpperCAmelCase)) : List[Any] = (x[:, 0].min(), x[:, 0].max()) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = (x[:, 1].min(), x[:, 1].max()) ((_UpperCAmelCase) , (_UpperCAmelCase)) : int = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max)) _UpperCAmelCase : int = np.c_[xxa.ravel(), xxa.ravel()] _UpperCAmelCase : Union[str, Any] = predict_prob(grid).reshape(xxa.shape) plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''') plt.legend() plt.show()
72
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowercase ( A__ , A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __magic_name__( self :int ) -> Optional[int]: torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] __SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ) if str(lowerCAmelCase__ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ ) else: __SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Union[str, Any] ) -> str: __SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Any = self.get_dummy_components() __SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = '''french fries''' __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = output.images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0 __SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5 __SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) __SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) __SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Union[str, Any] ) -> Dict: __SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __magic_name__( self :str ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0] __SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae'''] __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): __SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode() __SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0] __SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Union[str, Any] ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) __SCREAMING_SNAKE_CASE : Dict = { '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Dict ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = self.get_inputs() __SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Optional[int] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : str = self.get_inputs() __SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = 0 def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None: __SCREAMING_SNAKE_CASE : Dict = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: __SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __magic_name__( self :List[str] ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __magic_name__( self :int ) -> Tuple: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 __SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) ) __SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix''' __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = output.images[0] __SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) __SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
696
0
a_ : str = 'Alexander Joslin' import operator as op from .stack import Stack def lowerCamelCase__ (_UpperCAmelCase): SCREAMING_SNAKE_CASE = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub} SCREAMING_SNAKE_CASE = Stack() SCREAMING_SNAKE_CASE = Stack() for i in equation: if i.isdigit(): # RULE 1 operand_stack.push(int(_UpperCAmelCase)) elif i in operators: # RULE 2 operator_stack.push(_UpperCAmelCase) elif i == ")": # RULE 4 SCREAMING_SNAKE_CASE = operator_stack.peek() operator_stack.pop() SCREAMING_SNAKE_CASE = operand_stack.peek() operand_stack.pop() SCREAMING_SNAKE_CASE = operand_stack.peek() operand_stack.pop() SCREAMING_SNAKE_CASE = operators[opr](_UpperCAmelCase , _UpperCAmelCase) operand_stack.push(_UpperCAmelCase) # RULE 5 return operand_stack.peek() if __name__ == "__main__": a_ : Dict = '(5 + ((4 * 2) * (2 + 3)))' # answer = 45 print(f"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
73
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : torch.FloatTensor SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase__ ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) ) return torch.tensor(lowercase__ , dtype=torch.floataa ) class _lowercase ( A__ , A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 1 @register_to_config def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]: if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None: __SCREAMING_SNAKE_CASE : Optional[int] = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one'''] if trained_betas is not None: __SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "linear": __SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __SCREAMING_SNAKE_CASE : List[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas __SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. __SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution __SCREAMING_SNAKE_CASE : Any = 1.0 # setable values __SCREAMING_SNAKE_CASE : Optional[Any] = None __SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) ) def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor: return sample def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' f''' maximal {self.config.num_train_timesteps} timesteps.''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps __SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa ) __SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ ) self.timesteps += self.config.steps_offset def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) __SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process __SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep] __SCREAMING_SNAKE_CASE : str = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) __SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 __SCREAMING_SNAKE_CASE : List[Any] = model_output elif self.config.prediction_type == "sample": __SCREAMING_SNAKE_CASE : List[str] = model_output __SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": __SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output __SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: __SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __len__( self :Optional[int] ) -> List[Any]: return self.config.num_train_timesteps
696
0
import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def a__ ( snake_case , snake_case ): """simple docstring""" __SCREAMING_SNAKE_CASE : Tuple = XCLIPTextConfig() # derive patch size from model name __SCREAMING_SNAKE_CASE : Tuple = model_name.find('''patch''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPVisionConfig(patch_size=snake_case , num_frames=snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 __SCREAMING_SNAKE_CASE : Optional[Any] = 12 __SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 __SCREAMING_SNAKE_CASE : int = 4_096 __SCREAMING_SNAKE_CASE : Tuple = 16 __SCREAMING_SNAKE_CASE : Optional[int] = 24 __SCREAMING_SNAKE_CASE : Optional[int] = 768 __SCREAMING_SNAKE_CASE : Optional[int] = 3_072 if model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Any = 336 __SCREAMING_SNAKE_CASE : Any = XCLIPConfig.from_text_vision_configs(snake_case , snake_case ) if "large" in model_name: __SCREAMING_SNAKE_CASE : Any = 768 return config def a__ ( snake_case ): """simple docstring""" # text encoder if name == "token_embedding.weight": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' ) if name == "positional_embedding": __SCREAMING_SNAKE_CASE : List[str] = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' ) if "ln_1" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''ln_1''' , '''layer_norm1''' ) if "ln_2" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''ln_2''' , '''layer_norm2''' ) if "c_fc" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''c_fc''' , '''fc1''' ) if "c_proj" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''c_proj''' , '''fc2''' ) if name.startswith('''transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : Any = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' ) if "attn.out_proj" in name and "message" not in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' ) if "ln_final" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''ln_final''' , '''text_model.final_layer_norm''' ) # visual encoder if name == "visual.class_embedding": __SCREAMING_SNAKE_CASE : Optional[Any] = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' ) if name == "visual.positional_embedding": __SCREAMING_SNAKE_CASE : Tuple = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' ) if name.startswith('''visual.transformer.resblocks''' ): __SCREAMING_SNAKE_CASE : List[Any] = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' ) if "visual.conv1" in name: __SCREAMING_SNAKE_CASE : Any = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' ) if "visual.ln_pre" in name: __SCREAMING_SNAKE_CASE : List[str] = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' ) if "visual.ln_post" in name: __SCREAMING_SNAKE_CASE : Dict = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' ) if "visual.proj" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''visual.proj''' , '''visual_projection.weight''' ) if "text_projection" in name: __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''text_projection''' , '''text_projection.weight''' ) # things on top if "prompts_visual_proj" in name: __SCREAMING_SNAKE_CASE : str = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' ) if "prompts_visual_ln" in name: __SCREAMING_SNAKE_CASE : Optional[int] = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' ) # mit if name == "mit.positional_embedding": __SCREAMING_SNAKE_CASE : Any = name.replace('''positional''' , '''position''' ) if name.startswith('''mit.resblocks''' ): __SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' ) # prompts generator if name.startswith('''prompts_generator.norm''' ): __SCREAMING_SNAKE_CASE : Tuple = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' ) return name def a__ ( snake_case , snake_case ): """simple docstring""" for key in orig_state_dict.copy().keys(): __SCREAMING_SNAKE_CASE : Tuple = orig_state_dict.pop(snake_case ) if "attn.in_proj" in key: __SCREAMING_SNAKE_CASE : Optional[Any] = key.split('''.''' ) if key.startswith('''visual''' ): __SCREAMING_SNAKE_CASE : List[Any] = key_split[3] __SCREAMING_SNAKE_CASE : Any = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __SCREAMING_SNAKE_CASE : Union[str, Any] = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Optional[Any] = val[ :dim ] __SCREAMING_SNAKE_CASE : Tuple = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[ -dim: ] else: if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[ :dim, : ] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : str = val[ -dim:, : ] else: __SCREAMING_SNAKE_CASE : Dict = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : Tuple = val[-dim:] elif key.startswith('''mit''' ): __SCREAMING_SNAKE_CASE : List[str] = key_split[2] __SCREAMING_SNAKE_CASE : Union[str, Any] = config.vision_config.mit_hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : str = val[:dim, :] __SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :] __SCREAMING_SNAKE_CASE : Optional[int] = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Any = val[:dim] __SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2] __SCREAMING_SNAKE_CASE : Optional[Any] = val[-dim:] else: __SCREAMING_SNAKE_CASE : Optional[Any] = key_split[2] __SCREAMING_SNAKE_CASE : Any = config.text_config.hidden_size if "weight" in key: __SCREAMING_SNAKE_CASE : Tuple = val[:dim, :] __SCREAMING_SNAKE_CASE : int = val[ dim : dim * 2, : ] __SCREAMING_SNAKE_CASE : Dict = val[-dim:, :] else: __SCREAMING_SNAKE_CASE : Tuple = val[:dim] __SCREAMING_SNAKE_CASE : str = val[ dim : dim * 2 ] __SCREAMING_SNAKE_CASE : int = val[-dim:] else: __SCREAMING_SNAKE_CASE : int = rename_key(snake_case ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __SCREAMING_SNAKE_CASE : int = val.T __SCREAMING_SNAKE_CASE : Union[str, Any] = val return orig_state_dict def a__ ( snake_case ): """simple docstring""" if num_frames == 8: __SCREAMING_SNAKE_CASE : List[Any] = '''eating_spaghetti_8_frames.npy''' elif num_frames == 16: __SCREAMING_SNAKE_CASE : Tuple = '''eating_spaghetti.npy''' elif num_frames == 32: __SCREAMING_SNAKE_CASE : Dict = '''eating_spaghetti_32_frames.npy''' __SCREAMING_SNAKE_CASE : List[str] = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename=snake_case , repo_type='''dataset''' , ) __SCREAMING_SNAKE_CASE : int = np.load(snake_case ) return list(snake_case ) def a__ ( snake_case , snake_case=None , snake_case=False ): """simple docstring""" __SCREAMING_SNAKE_CASE : Dict = { # fully supervised kinetics-400 checkpoints '''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''', '''xclip-base-patch32-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth''' ), '''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''', '''xclip-base-patch16-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth''' ), '''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb''', '''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f''', # fully supervised kinetics-600 checkpoints '''xclip-base-patch16-kinetics-600''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth''' ), '''xclip-base-patch16-kinetics-600-16-frames''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth''' ), '''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be''', # few shot '''xclip-base-patch16-hmdb-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth''' ), '''xclip-base-patch16-hmdb-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth''' ), '''xclip-base-patch16-hmdb-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth''' ), '''xclip-base-patch16-hmdb-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth''' ), '''xclip-base-patch16-ucf-2-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth''' ), '''xclip-base-patch16-ucf-4-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth''' ), '''xclip-base-patch16-ucf-8-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth''' ), '''xclip-base-patch16-ucf-16-shot''': ( '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth''' ), # zero shot '''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''', } __SCREAMING_SNAKE_CASE : Optional[Any] = model_to_url[model_name] __SCREAMING_SNAKE_CASE : Any = 8 if "16-frames" in model_name: __SCREAMING_SNAKE_CASE : Optional[int] = 16 elif "shot" in model_name: __SCREAMING_SNAKE_CASE : Optional[Any] = 32 __SCREAMING_SNAKE_CASE : List[str] = get_xclip_config(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Tuple = XCLIPModel(snake_case ) model.eval() if "drive" in checkpoint_url: __SCREAMING_SNAKE_CASE : Union[str, Any] = '''pytorch_model.bin''' gdown.cached_download(snake_case , snake_case , quiet=snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.load(snake_case , map_location='''cpu''' )['''model'''] else: __SCREAMING_SNAKE_CASE : str = torch.hub.load_state_dict_from_url(snake_case )['''model'''] __SCREAMING_SNAKE_CASE : List[Any] = convert_state_dict(snake_case , snake_case ) __SCREAMING_SNAKE_CASE : Union[str, Any] = XCLIPModel(snake_case ) __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE : Any = model.load_state_dict(snake_case , strict=snake_case ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __SCREAMING_SNAKE_CASE : Any = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224 __SCREAMING_SNAKE_CASE : str = VideoMAEImageProcessor(size=snake_case ) __SCREAMING_SNAKE_CASE : int = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : Optional[int] = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' ) __SCREAMING_SNAKE_CASE : List[Any] = XCLIPProcessor(image_processor=snake_case , tokenizer=snake_case ) __SCREAMING_SNAKE_CASE : Dict = prepare_video(snake_case ) __SCREAMING_SNAKE_CASE : List[str] = processor( text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=snake_case , return_tensors='''pt''' , padding=snake_case ) print('''Shape of pixel values:''' , inputs.pixel_values.shape ) with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**snake_case ) # Verify outputs __SCREAMING_SNAKE_CASE : Dict = outputs.logits_per_video __SCREAMING_SNAKE_CASE : Tuple = logits_per_video.softmax(dim=1 ) print('''Probs:''' , snake_case ) # kinetics-400 if model_name == "xclip-base-patch32": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[0.0019, 0.9951, 0.0030]] ) elif model_name == "xclip-base-patch32-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[7.0999E-04, 9.9883E-01, 4.5580E-04]] ) elif model_name == "xclip-base-patch16": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.0083, 0.9681, 0.0236]] ) elif model_name == "xclip-base-patch16-16-frames": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[7.6937E-04, 9.9728E-01, 1.9473E-03]] ) elif model_name == "xclip-large-patch14": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0062, 0.9864, 0.0075]] ) elif model_name == "xclip-large-patch14-16-frames": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[3.3877E-04, 9.9937E-01, 2.8888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[0.0555, 0.8914, 0.0531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([[3.8554E-04, 9.9929E-01, 3.2754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __SCREAMING_SNAKE_CASE : List[str] = torch.tensor([[0.0036, 0.9920, 0.0045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __SCREAMING_SNAKE_CASE : str = torch.tensor([[7.1890E-06, 9.9994E-01, 5.6559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __SCREAMING_SNAKE_CASE : int = torch.tensor([[1.0320E-05, 9.9993E-01, 6.2435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[4.1377E-06, 9.9990E-01, 9.8386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __SCREAMING_SNAKE_CASE : Dict = torch.tensor([[4.1347E-05, 9.9962E-01, 3.3411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([[8.5857E-05, 9.9928E-01, 6.3291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([[0.0027, 0.9904, 0.0070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __SCREAMING_SNAKE_CASE : Tuple = torch.tensor([[9.8219E-04, 9.9593E-01, 3.0863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __SCREAMING_SNAKE_CASE : List[Any] = torch.tensor([[3.5082E-04, 9.9785E-01, 1.7966E-03]] ) else: raise ValueError(F'''Model name {model_name} not supported''' ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) print('''Looks ok!''' ) if pytorch_dump_folder_path is not None: print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(snake_case ) if push_to_hub: print('''Pushing model, processor and slow tokenizer files to the hub...''' ) model.push_to_hub(snake_case , organization='''nielsr''' ) processor.push_to_hub(snake_case , organization='''nielsr''' ) slow_tokenizer.push_to_hub(snake_case , organization='''nielsr''' ) if __name__ == "__main__": lowercase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""xclip-base-patch32""", type=str, help="""Name of the model.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub.""" ) lowercase_ = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
74
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} ) SCREAMING_SNAKE_CASE__ : str = "text" SCREAMING_SNAKE_CASE__ : str = "summary" @property def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
696
0
'''simple docstring''' import json import sys def a__ ( lowerCAmelCase__ , lowerCAmelCase__ ) -> List[str]: with open(lowerCAmelCase__ , encoding='''utf-8''' ) as f: UpperCAmelCase__ : Union[str, Any] = json.load(lowerCAmelCase__ ) UpperCAmelCase__ : str = ['''<details>''', '''<summary>Show updated benchmarks!</summary>''', ''' '''] for benchmark_name in sorted(lowerCAmelCase__ ): UpperCAmelCase__ : int = results[benchmark_name] UpperCAmelCase__ : Optional[int] = benchmark_name.split('''/''' )[-1] output_md.append(F"""### Benchmark: {benchmark_file_name}""" ) UpperCAmelCase__ : Optional[Any] = '''| metric |''' UpperCAmelCase__ : str = '''|--------|''' UpperCAmelCase__ : List[Any] = '''| new / old (diff) |''' for metric_name in sorted(lowerCAmelCase__ ): UpperCAmelCase__ : Union[str, Any] = benchmark_res[metric_name] UpperCAmelCase__ : List[Any] = metric_vals['''new'''] UpperCAmelCase__ : int = metric_vals.get('''old''' , lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = metric_vals.get('''diff''' , lowerCAmelCase__ ) UpperCAmelCase__ : Any = F""" {new_val:f}""" if isinstance(lowerCAmelCase__ , (int, float) ) else '''None''' if old_val is not None: val_str += F""" / {old_val:f}""" if isinstance(lowerCAmelCase__ , (int, float) ) else "None" if dif_val is not None: val_str += F""" ({dif_val:f})""" if isinstance(lowerCAmelCase__ , (int, float) ) else "None" title += " " + metric_name + " |" lines += "---|" value += val_str + " |" output_md += [title, lines, value, " "] output_md.append('''</details>''' ) with open(lowerCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.writelines('''\n'''.join(lowerCAmelCase__ ) ) if __name__ == "__main__": UpperCamelCase__ = sys.argv[1] UpperCamelCase__ = sys.argv[2] format_json_to_md(input_json_file, output_md_file)
75
def _UpperCamelCase ( lowercase__ = 10**9 ): __SCREAMING_SNAKE_CASE : List[str] = 1 __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Dict = 0 __SCREAMING_SNAKE_CASE : Any = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
696
0
"""simple docstring""" import gc import unittest import numpy as np import torch import torch.nn.functional as F from transformers import ( ClapTextConfig, ClapTextModelWithProjection, RobertaTokenizer, SpeechTaHifiGan, SpeechTaHifiGanConfig, ) from diffusers import ( AudioLDMPipeline, AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.utils import is_xformers_available, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( snake_case , unittest.TestCase ): UpperCamelCase =AudioLDMPipeline UpperCamelCase =TEXT_TO_AUDIO_PARAMS UpperCamelCase =TEXT_TO_AUDIO_BATCH_PARAMS UpperCamelCase =frozenset( [ "num_inference_steps", "num_waveforms_per_prompt", "generator", "latents", "output_type", "return_dict", "callback", "callback_steps", ] ) def _lowerCamelCase ( self ) -> List[str]: torch.manual_seed(0 ) __lowercase : Tuple = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=(32, 64) , class_embed_type='''simple_projection''' , projection_class_embeddings_input_dim=32 , class_embeddings_concat=UpperCamelCase_ , ) __lowercase : List[Any] = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , ) torch.manual_seed(0 ) __lowercase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __lowercase : List[Any] = ClapTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , projection_dim=32 , ) __lowercase : Optional[int] = ClapTextModelWithProjection(UpperCamelCase_ ) __lowercase : Dict = RobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-roberta''' , model_max_length=77 ) __lowercase : Union[str, Any] = SpeechTaHifiGanConfig( model_in_dim=8 , sampling_rate=1_60_00 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=UpperCamelCase_ , ) __lowercase : Union[str, Any] = SpeechTaHifiGan(UpperCamelCase_ ) __lowercase : Dict = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''vocoder''': vocoder, } return components def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=0 ) -> Any: if str(UpperCamelCase_ ).startswith('''mps''' ): __lowercase : Union[str, Any] = torch.manual_seed(UpperCamelCase_ ) else: __lowercase : List[Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __lowercase : Optional[Any] = { '''prompt''': '''A hammer hitting a wooden surface''', '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, } return inputs def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowercase : str = self.get_dummy_components() __lowercase : Tuple = AudioLDMPipeline(**UpperCamelCase_ ) __lowercase : Optional[int] = audioldm_pipe.to(UpperCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowercase : Any = self.get_dummy_inputs(UpperCamelCase_ ) __lowercase : Optional[Any] = audioldm_pipe(**UpperCamelCase_ ) __lowercase : Any = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase_ ) == 2_56 __lowercase : Dict = audio[:10] __lowercase : Any = np.array( [-0.0_0_5_0, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_3, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_3] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Optional[int] = self.get_dummy_components() __lowercase : Dict = AudioLDMPipeline(**UpperCamelCase_ ) __lowercase : Tuple = audioldm_pipe.to(UpperCamelCase_ ) __lowercase : Any = audioldm_pipe.to(UpperCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowercase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ ) __lowercase : Any = 3 * [inputs['''prompt''']] # forward __lowercase : Dict = audioldm_pipe(**UpperCamelCase_ ) __lowercase : int = output.audios[0] __lowercase : int = self.get_dummy_inputs(UpperCamelCase_ ) __lowercase : Optional[int] = 3 * [inputs.pop('''prompt''' )] __lowercase : Optional[Any] = audioldm_pipe.tokenizer( UpperCamelCase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , ) __lowercase : Tuple = text_inputs['''input_ids'''].to(UpperCamelCase_ ) __lowercase : List[Any] = audioldm_pipe.text_encoder( UpperCamelCase_ , ) __lowercase : Any = prompt_embeds.text_embeds # additional L_2 normalization over each hidden-state __lowercase : Tuple = F.normalize(UpperCamelCase_ , dim=-1 ) __lowercase : Optional[int] = prompt_embeds # forward __lowercase : Any = audioldm_pipe(**UpperCamelCase_ ) __lowercase : str = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : List[Any] = self.get_dummy_components() __lowercase : Tuple = AudioLDMPipeline(**UpperCamelCase_ ) __lowercase : Dict = audioldm_pipe.to(UpperCamelCase_ ) __lowercase : Union[str, Any] = audioldm_pipe.to(UpperCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowercase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ ) __lowercase : Dict = 3 * ['''this is a negative prompt'''] __lowercase : Optional[int] = negative_prompt __lowercase : Tuple = 3 * [inputs['''prompt''']] # forward __lowercase : int = audioldm_pipe(**UpperCamelCase_ ) __lowercase : Optional[int] = output.audios[0] __lowercase : Tuple = self.get_dummy_inputs(UpperCamelCase_ ) __lowercase : int = 3 * [inputs.pop('''prompt''' )] __lowercase : Optional[int] = [] for p in [prompt, negative_prompt]: __lowercase : List[Any] = audioldm_pipe.tokenizer( UpperCamelCase_ , padding='''max_length''' , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='''pt''' , ) __lowercase : Tuple = text_inputs['''input_ids'''].to(UpperCamelCase_ ) __lowercase : int = audioldm_pipe.text_encoder( UpperCamelCase_ , ) __lowercase : int = text_embeds.text_embeds # additional L_2 normalization over each hidden-state __lowercase : str = F.normalize(UpperCamelCase_ , dim=-1 ) embeds.append(UpperCamelCase_ ) __lowercase ,__lowercase : Union[str, Any] = embeds # forward __lowercase : int = audioldm_pipe(**UpperCamelCase_ ) __lowercase : List[str] = output.audios[0] assert np.abs(audio_a - audio_a ).max() < 1E-2 def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Dict = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowercase : Any = self.get_dummy_components() __lowercase : Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) __lowercase : Tuple = AudioLDMPipeline(**UpperCamelCase_ ) __lowercase : Tuple = audioldm_pipe.to(UpperCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowercase : Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ ) __lowercase : Dict = '''egg cracking''' __lowercase : int = audioldm_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ ) __lowercase : Union[str, Any] = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase_ ) == 2_56 __lowercase : str = audio[:10] __lowercase : Any = np.array( [-0.0_0_5_1, 0.0_0_5_0, -0.0_0_6_0, 0.0_0_3_4, -0.0_0_2_6, 0.0_0_3_3, -0.0_0_2_7, 0.0_0_3_3, -0.0_0_2_8, 0.0_0_3_2] ) assert np.abs(audio_slice - expected_slice ).max() < 1E-2 def _lowerCamelCase ( self ) -> Dict: __lowercase : str = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowercase : Tuple = self.get_dummy_components() __lowercase : str = PNDMScheduler(skip_prk_steps=UpperCamelCase_ ) __lowercase : Dict = AudioLDMPipeline(**UpperCamelCase_ ) __lowercase : Tuple = audioldm_pipe.to(UpperCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowercase : Any = '''A hammer hitting a wooden surface''' # test num_waveforms_per_prompt=1 (default) __lowercase : Union[str, Any] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=2 ).audios assert audios.shape == (1, 2_56) # test num_waveforms_per_prompt=1 (default) for batch of prompts __lowercase : Union[str, Any] = 2 __lowercase : List[Any] = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios assert audios.shape == (batch_size, 2_56) # test num_waveforms_per_prompt for single prompt __lowercase : Tuple = 2 __lowercase : int = audioldm_pipe(UpperCamelCase_ , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase_ ).audios assert audios.shape == (num_waveforms_per_prompt, 2_56) # test num_waveforms_per_prompt for batch of prompts __lowercase : str = 2 __lowercase : int = audioldm_pipe( [prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=UpperCamelCase_ ).audios assert audios.shape == (batch_size * num_waveforms_per_prompt, 2_56) def _lowerCamelCase ( self ) -> List[Any]: __lowercase : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator __lowercase : str = self.get_dummy_components() __lowercase : List[Any] = AudioLDMPipeline(**UpperCamelCase_ ) __lowercase : Optional[int] = audioldm_pipe.to(UpperCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowercase : List[Any] = audioldm_pipe.vocoder.config.sampling_rate __lowercase : Optional[Any] = self.get_dummy_inputs(UpperCamelCase_ ) __lowercase : List[str] = audioldm_pipe(audio_length_in_s=0.0_1_6 , **UpperCamelCase_ ) __lowercase : Optional[int] = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.0_1_6 __lowercase : int = audioldm_pipe(audio_length_in_s=0.0_3_2 , **UpperCamelCase_ ) __lowercase : Any = output.audios[0] assert audio.ndim == 1 assert len(UpperCamelCase_ ) / vocoder_sampling_rate == 0.0_3_2 def _lowerCamelCase ( self ) -> Optional[int]: __lowercase : str = self.get_dummy_components() __lowercase : int = AudioLDMPipeline(**UpperCamelCase_ ) __lowercase : Optional[Any] = audioldm_pipe.to(UpperCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowercase : Optional[Any] = ['''hey'''] __lowercase : List[Any] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=1 ) __lowercase : Optional[Any] = output.audios.shape assert audio_shape == (1, 2_56) __lowercase : Optional[Any] = audioldm_pipe.vocoder.config config.model_in_dim *= 2 __lowercase : List[Any] = SpeechTaHifiGan(UpperCamelCase_ ).to(UpperCamelCase_ ) __lowercase : Union[str, Any] = audioldm_pipe(UpperCamelCase_ , num_inference_steps=1 ) __lowercase : Optional[int] = output.audios.shape # waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram assert audio_shape == (1, 2_56) def _lowerCamelCase ( self ) -> Dict: self._test_attention_slicing_forward_pass(test_mean_pixel_difference=UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: self._test_inference_batch_single_identical(test_mean_pixel_difference=UpperCamelCase_ ) @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def _lowerCamelCase ( self ) -> Optional[Any]: self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=UpperCamelCase_ ) @slow class UpperCAmelCase_ ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_="cpu" , UpperCamelCase_=torch.floataa , UpperCamelCase_=0 ) -> Any: __lowercase : List[str] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ ) __lowercase : Optional[int] = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 8, 1_28, 16) ) __lowercase : List[str] = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ ) __lowercase : str = { '''prompt''': '''A hammer hitting a wooden surface''', '''latents''': latents, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 2.5, } return inputs def _lowerCamelCase ( self ) -> Dict: __lowercase : Tuple = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) __lowercase : Optional[Any] = audioldm_pipe.to(UpperCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowercase : List[Any] = self.get_inputs(UpperCamelCase_ ) __lowercase : Union[str, Any] = 25 __lowercase : Dict = audioldm_pipe(**UpperCamelCase_ ).audios[0] assert audio.ndim == 1 assert len(UpperCamelCase_ ) == 8_19_20 __lowercase : Optional[int] = audio[7_72_30:7_72_40] __lowercase : Tuple = np.array( [-0.4_8_8_4, -0.4_6_0_7, 0.0_0_2_3, 0.5_0_0_7, 0.5_8_9_6, 0.5_1_5_1, 0.3_8_1_3, -0.0_2_0_8, -0.3_6_8_7, -0.4_3_1_5] ) __lowercase : Union[str, Any] = np.abs(expected_slice - audio_slice ).max() assert max_diff < 1E-2 def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : List[str] = AudioLDMPipeline.from_pretrained('''cvssp/audioldm''' ) __lowercase : str = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config ) __lowercase : Optional[int] = audioldm_pipe.to(UpperCamelCase_ ) audioldm_pipe.set_progress_bar_config(disable=UpperCamelCase_ ) __lowercase : Any = self.get_inputs(UpperCamelCase_ ) __lowercase : List[Any] = audioldm_pipe(**UpperCamelCase_ ).audios[0] assert audio.ndim == 1 assert len(UpperCamelCase_ ) == 8_19_20 __lowercase : int = audio[2_77_80:2_77_90] __lowercase : Dict = np.array([-0.2_1_3_1, -0.0_8_7_3, -0.0_1_2_4, -0.0_1_8_9, 0.0_5_6_9, 0.1_3_7_3, 0.1_8_8_3, 0.2_8_8_6, 0.3_2_9_7, 0.2_2_1_2] ) __lowercase : Any = np.abs(expected_slice - audio_slice ).max() assert max_diff < 3E-2
76
def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : str = len(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __SCREAMING_SNAKE_CASE : str = True for i in range(lowercase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __SCREAMING_SNAKE_CASE : Union[str, Any] = True if a[i].islower(): __SCREAMING_SNAKE_CASE : Union[str, Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
696
0
"""simple docstring""" import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def _UpperCamelCase ( UpperCamelCase ) -> Tuple: """simple docstring""" __UpperCAmelCase : int = [ "decoder.version", "decoder.output_projection.weight", "_float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(UpperCamelCase , UpperCamelCase ) def _UpperCamelCase ( UpperCamelCase ) -> List[Any]: """simple docstring""" __UpperCAmelCase , __UpperCAmelCase : Any = emb.weight.shape __UpperCAmelCase : Dict = nn.Linear(UpperCamelCase , UpperCamelCase , bias=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def _UpperCamelCase ( UpperCamelCase ) -> Dict: """simple docstring""" __UpperCAmelCase : Tuple = torch.load(UpperCamelCase , map_location="cpu" ) __UpperCAmelCase : Tuple = Namespace(**checkpoint["cfg"]["model"] ) __UpperCAmelCase : str = checkpoint["model"] remove_ignore_keys_(UpperCamelCase ) __UpperCAmelCase : int = state_dict["decoder.embed_tokens.weight"].shape[0] __UpperCAmelCase : Optional[int] = {key.replace("decoder" , "model" ): val for key, val in state_dict.items()} __UpperCAmelCase : Any = XGLMConfig( vocab_size=UpperCamelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="gelu" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) __UpperCAmelCase : Union[str, Any] = XGLMForCausalLM(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.load_state_dict(UpperCamelCase , strict=UpperCamelCase ) print(UpperCamelCase ) __UpperCAmelCase : int = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": A = argparse.ArgumentParser() # Required parameters parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""") parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") A = parser.parse_args() A = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
77
from scipy.stats import pearsonr import datasets __lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' __lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' __lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): '''simple docstring''' def __magic_name__( self :Optional[int] ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int: if return_pvalue: __SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
696
0
'''simple docstring''' from PIL import Image def lowerCAmelCase_ ( snake_case_ : Image ) -> Image: '''simple docstring''' UpperCAmelCase_ , UpperCAmelCase_ = image.size UpperCAmelCase_ = 0 UpperCAmelCase_ = image.load() for i in range(snake_case_ ): for j in range(snake_case_ ): UpperCAmelCase_ = pixels[j, i] mean += pixel mean //= width * height for j in range(snake_case_ ): for i in range(snake_case_ ): UpperCAmelCase_ = 2_55 if pixels[i, j] > mean else 0 return image if __name__ == "__main__": SCREAMING_SNAKE_CASE_: str =mean_threshold(Image.open('path_to_image').convert('L')) image.save('output_image_path')
78
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer __lowerCAmelCase : List[str] =logging.get_logger(__name__) __lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : int ={ 'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'}, 'tokenizer_file': { 'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json' }, } __lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2} __lowerCAmelCase : Union[str, Any] ={} class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]: super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars ): __SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case __SCREAMING_SNAKE_CASE : str = strip_accents __SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars __SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = do_lower_case def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple: __SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: __SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ )
696
0
import timeit import numpy as np import datasets from datasets.arrow_writer import ArrowWriter from datasets.features.features import _ArrayXD def _lowerCamelCase ( __lowerCamelCase ) -> List[Any]: '''simple docstring''' def wrapper(*__lowerCamelCase , **__lowerCamelCase ): UpperCAmelCase__ : str = timeit.default_timer() UpperCAmelCase__ : Union[str, Any] = func(*__lowerCamelCase , **__lowerCamelCase ) UpperCAmelCase__ : int = timeit.default_timer() - starttime return delta UpperCAmelCase__ : Dict = func.__name__ return wrapper def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase=100 , __lowerCamelCase=None ) -> Any: '''simple docstring''' UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Dict = seq_shapes or {} for i in range(__lowerCamelCase ): UpperCAmelCase__ : Optional[int] = {} for col_id, (k, v) in enumerate(features.items() ): if isinstance(__lowerCamelCase , _ArrayXD ): UpperCAmelCase__ : Tuple = np.random.rand(*v.shape ).astype(v.dtype ) elif isinstance(__lowerCamelCase , datasets.Value ): if v.dtype == "string": UpperCAmelCase__ : Optional[int] = """The small grey turtle was surprisingly fast when challenged.""" else: UpperCAmelCase__ : Union[str, Any] = np.random.randint(10 , size=1 ).astype(v.dtype ).item() elif isinstance(__lowerCamelCase , datasets.Sequence ): while isinstance(__lowerCamelCase , datasets.Sequence ): UpperCAmelCase__ : str = v.feature UpperCAmelCase__ : str = seq_shapes[k] UpperCAmelCase__ : Any = np.random.rand(*__lowerCamelCase ).astype(v.dtype ) UpperCAmelCase__ : Any = data dummy_data.append((i, example) ) return dummy_data def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=100 , __lowerCamelCase=None ) -> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = generate_examples(__lowerCamelCase , num_examples=__lowerCamelCase , seq_shapes=__lowerCamelCase ) with ArrowWriter(features=__lowerCamelCase , path=__lowerCamelCase ) as writer: for key, record in dummy_data: UpperCAmelCase__ : List[Any] = features.encode_example(__lowerCamelCase ) writer.write(__lowerCamelCase ) UpperCAmelCase__ , UpperCAmelCase__ : str = writer.finalize() if not num_final_examples == num_examples: raise ValueError( F"Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}." ) UpperCAmelCase__ : Any = datasets.Dataset.from_file(filename=__lowerCamelCase , info=datasets.DatasetInfo(features=__lowerCamelCase ) ) return dataset
79
import os def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] ) __SCREAMING_SNAKE_CASE : str = len(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : Any = 0 __SCREAMING_SNAKE_CASE : Dict = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(lowercase__ ): for j in range(n_rows - 3 ): __SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] __SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: __SCREAMING_SNAKE_CASE : Any = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: __SCREAMING_SNAKE_CASE : Any = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) __SCREAMING_SNAKE_CASE : Optional[int] = max( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if max_product > largest: __SCREAMING_SNAKE_CASE : Tuple = max_product return largest def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Optional[int] = [] with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file: for line in file: grid.append(line.strip('''\n''' ).split(''' ''' ) ) __SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )] return largest_product(lowercase__ ) if __name__ == "__main__": print(solution())
696
0
import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset __UpperCamelCase : Optional[Any] = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class __UpperCamelCase ( nn.Module ): def __init__( self : int , _lowerCAmelCase : Dict ) -> Optional[Any]: """simple docstring""" super().__init__() __lowercase = torchvision.models.resnetaaa(pretrained=_lowerCAmelCase ) __lowercase = list(model.children() )[:-2] __lowercase = nn.Sequential(*_lowerCAmelCase ) __lowercase = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def _a ( self : str , _lowerCAmelCase : List[str] ) -> int: """simple docstring""" __lowercase = self.pool(self.model(_lowerCAmelCase ) ) __lowercase = torch.flatten(_lowerCAmelCase , start_dim=2 ) __lowercase = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class __UpperCamelCase ( _lowerCAmelCase ): def __init__( self : int , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple , _lowerCAmelCase : Any , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any ) -> List[str]: """simple docstring""" __lowercase = [json.loads(_lowerCAmelCase ) for l in open(_lowerCAmelCase )] __lowercase = os.path.dirname(_lowerCAmelCase ) __lowercase = tokenizer __lowercase = labels __lowercase = len(_lowerCAmelCase ) __lowercase = max_seq_length __lowercase = transforms def __len__( self : int ) -> Tuple: """simple docstring""" return len(self.data ) def __getitem__( self : Any , _lowerCAmelCase : Any ) -> str: """simple docstring""" __lowercase = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=_lowerCAmelCase ) ) __lowercase , __lowercase , __lowercase = sentence[0], sentence[1:-1], sentence[-1] __lowercase = sentence[: self.max_seq_length] __lowercase = torch.zeros(self.n_classes ) __lowercase = 1 __lowercase = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __lowercase = self.transforms(_lowerCAmelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def _a ( self : List[Any] ) -> Any: """simple docstring""" __lowercase = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def snake_case ( lowerCamelCase ): '''simple docstring''' __lowercase = [len(row["""sentence"""] ) for row in batch] __lowercase , __lowercase = len(lowerCamelCase ), max(lowerCamelCase ) __lowercase = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long ) __lowercase = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase ) ): __lowercase = input_row["""sentence"""] __lowercase = 1 __lowercase = torch.stack([row["""image"""] for row in batch] ) __lowercase = torch.stack([row["""label"""] for row in batch] ) __lowercase = torch.stack([row["""image_start_token"""] for row in batch] ) __lowercase = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def snake_case ( ): '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def snake_case ( ): '''simple docstring''' return transforms.Compose( [ transforms.Resize(256 ), transforms.CenterCrop(224 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ), ] )
80
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _lowercase ( A__ ): '''simple docstring''' def __magic_name__( self :List[Any] ) -> Any: __SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) ) class _lowercase : '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str: __SCREAMING_SNAKE_CASE : Any = parent __SCREAMING_SNAKE_CASE : Dict = batch_size __SCREAMING_SNAKE_CASE : List[Any] = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = image_size __SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier __SCREAMING_SNAKE_CASE : Dict = min_depth __SCREAMING_SNAKE_CASE : List[str] = tf_padding __SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier ) __SCREAMING_SNAKE_CASE : List[str] = output_stride __SCREAMING_SNAKE_CASE : Any = hidden_act __SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = is_training __SCREAMING_SNAKE_CASE : Optional[int] = num_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range __SCREAMING_SNAKE_CASE : Optional[int] = scope def __magic_name__( self :List[str] ) -> int: __SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_labels: __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __SCREAMING_SNAKE_CASE : Any = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__( self :Union[str, Any] ) -> Optional[Any]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Tuple = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self :List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowercase ( A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Optional[Any] = ( {'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Tuple = False def __magic_name__( self :Any ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self ) __SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def __magic_name__( self :Dict ) -> Optional[Any]: pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def __magic_name__( self :List[Any] ) -> List[Any]: pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def __magic_name__( self :Any ) -> Dict: pass def __magic_name__( self :Any ) -> List[Any]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __magic_name__( self :Any ) -> Tuple: __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __magic_name__( self :Union[str, Any] ) -> Tuple: def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ): __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states __SCREAMING_SNAKE_CASE : Optional[int] = 26 self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : str = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE : List[Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def __magic_name__( self :List[str] ) -> List[Any]: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): '''simple docstring''' @cached_property def __magic_name__( self :Optional[int] ) -> Union[str, Any]: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__( self :Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor __SCREAMING_SNAKE_CASE : int = prepare_img() __SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): __SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ ) # verify the logits __SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
696
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class a (_lowerCAmelCase ): """simple docstring""" def __init__( self : int , lowerCamelCase : List[Any] , lowerCamelCase : str ) -> Optional[int]: __snake_case : Optional[Any] = params __snake_case : Union[str, Any] = np.array(lowerCamelCase ) __snake_case : List[Any] = np.array([len(lowerCamelCase ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : Dict , lowerCamelCase : Optional[int] ) -> Optional[Any]: return (self.token_ids[index], self.lengths[index]) def __len__( self : Union[str, Any] ) -> Tuple: return len(self.lengths ) def __snake_case ( self : Any ) -> int: assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def __snake_case ( self : Dict ) -> Dict: __snake_case : Optional[Any] = self.params.max_model_input_size __snake_case : str = self.lengths > max_len logger.info(F'Splitting {sum(lowerCamelCase )} too long sequences.' ) def divide_chunks(lowerCamelCase : List[Any] , lowerCamelCase : Any ): return [l[i : i + n] for i in range(0 , len(lowerCamelCase ) , lowerCamelCase )] __snake_case : List[Any] = [] __snake_case : Dict = [] if self.params.mlm: __snake_case , __snake_case : List[str] = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"] else: __snake_case , __snake_case : Optional[int] = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: __snake_case : Union[str, Any] = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: __snake_case : int = np.insert(lowerCamelCase , 0 , lowerCamelCase ) if sub_s[-1] != sep_id: __snake_case : List[str] = np.insert(lowerCamelCase , len(lowerCamelCase ) , lowerCamelCase ) assert len(lowerCamelCase ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(lowerCamelCase ) new_tok_ids.extend(lowerCamelCase ) new_lengths.extend([len(lowerCamelCase ) for l in sub_seqs] ) __snake_case : Tuple = np.array(lowerCamelCase ) __snake_case : Union[str, Any] = np.array(lowerCamelCase ) def __snake_case ( self : Optional[Any] ) -> int: __snake_case : Optional[Any] = len(self ) __snake_case : List[Any] = self.lengths > 11 __snake_case : Optional[Any] = self.token_ids[indices] __snake_case : List[Any] = self.lengths[indices] __snake_case : Any = len(self ) logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' ) def __snake_case ( self : Tuple ) -> List[str]: if "unk_token" not in self.params.special_tok_ids: return else: __snake_case : Optional[int] = self.params.special_tok_ids["unk_token"] __snake_case : Tuple = len(self ) __snake_case : Dict = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) __snake_case : Dict = (unk_occs / self.lengths) < 0.5 __snake_case : Optional[int] = self.token_ids[indices] __snake_case : Optional[Any] = self.lengths[indices] __snake_case : List[str] = len(self ) logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' ) def __snake_case ( self : Optional[Any] ) -> Optional[int]: if not self.params.is_master: return logger.info(F'{len(self )} sequences' ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def __snake_case ( self : str , lowerCamelCase : Optional[Any] ) -> str: __snake_case : str = [t[0] for t in batch] __snake_case : str = [t[1] for t in batch] assert len(lowerCamelCase ) == len(lowerCamelCase ) # Max for paddings __snake_case : List[Any] = max(lowerCamelCase ) # Pad token ids if self.params.mlm: __snake_case : Tuple = self.params.special_tok_ids["pad_token"] else: __snake_case : str = self.params.special_tok_ids["unk_token"] __snake_case : int = [list(t.astype(lowerCamelCase ) ) + [pad_idx] * (max_seq_len_ - len(lowerCamelCase )) for t in token_ids] assert len(tk_ ) == len(lowerCamelCase ) assert all(len(lowerCamelCase ) == max_seq_len_ for t in tk_ ) __snake_case : Optional[int] = torch.tensor(tk_ ) # (bs, max_seq_len_) __snake_case : List[str] = torch.tensor(lowerCamelCase ) # (bs) return tk_t, lg_t
81
import os from datetime import datetime as dt from github import Github __lowerCAmelCase : List[Any] =[ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' ) for issue in open_issues: __SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
696
0
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase = { """configuration_trajectory_transformer""": [ """TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrajectoryTransformerConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase = [ """TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TrajectoryTransformerModel""", """TrajectoryTransformerPreTrainedModel""", """load_tf_weights_in_trajectory_transformer""", ] if TYPE_CHECKING: from .configuration_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TrajectoryTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trajectory_transformer import ( TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TrajectoryTransformerModel, TrajectoryTransformerPreTrainedModel, load_tf_weights_in_trajectory_transformer, ) else: import sys lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
82
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : Dict ={ 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = '''canine''' def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size __SCREAMING_SNAKE_CASE : str = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act __SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Dict = initializer_range __SCREAMING_SNAKE_CASE : int = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps # Character config: __SCREAMING_SNAKE_CASE : Tuple = downsampling_rate __SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size __SCREAMING_SNAKE_CASE : Any = num_hash_functions __SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets __SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
696
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase__ = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''MBartTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['''MBartTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MBartForCausalLM''', '''MBartForConditionalGeneration''', '''MBartForQuestionAnswering''', '''MBartForSequenceClassification''', '''MBartModel''', '''MBartPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''TFMBartForConditionalGeneration''', '''TFMBartModel''', '''TFMBartPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ '''FlaxMBartForConditionalGeneration''', '''FlaxMBartForQuestionAnswering''', '''FlaxMBartForSequenceClassification''', '''FlaxMBartModel''', '''FlaxMBartPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
83
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : List[Any] =logging.get_logger(__name__) __lowerCAmelCase : Tuple ={ 'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl''' SCREAMING_SNAKE_CASE__ : List[str] = ['''mems'''] SCREAMING_SNAKE_CASE__ : List[Any] = { '''n_token''': '''vocab_size''', '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str: __SCREAMING_SNAKE_CASE : str = vocab_size __SCREAMING_SNAKE_CASE : Tuple = [] self.cutoffs.extend(lowerCAmelCase__ ) if proj_share_all_but_first: __SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs ) else: __SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs ) __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed __SCREAMING_SNAKE_CASE : Tuple = d_head __SCREAMING_SNAKE_CASE : Dict = d_inner __SCREAMING_SNAKE_CASE : Optional[Any] = div_val __SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm __SCREAMING_SNAKE_CASE : List[str] = n_layer __SCREAMING_SNAKE_CASE : int = n_head __SCREAMING_SNAKE_CASE : str = mem_len __SCREAMING_SNAKE_CASE : Union[str, Any] = same_length __SCREAMING_SNAKE_CASE : str = attn_type __SCREAMING_SNAKE_CASE : Dict = clamp_len __SCREAMING_SNAKE_CASE : Tuple = sample_softmax __SCREAMING_SNAKE_CASE : Optional[int] = adaptive __SCREAMING_SNAKE_CASE : int = dropout __SCREAMING_SNAKE_CASE : Optional[Any] = dropatt __SCREAMING_SNAKE_CASE : int = untie_r __SCREAMING_SNAKE_CASE : Optional[int] = init __SCREAMING_SNAKE_CASE : List[str] = init_range __SCREAMING_SNAKE_CASE : Any = proj_init_std __SCREAMING_SNAKE_CASE : List[str] = init_std __SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def __magic_name__( self :str ) -> int: # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
696
0
import argparse from collections import OrderedDict from pathlib import Path import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision.transforms import functional as F from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) UpperCAmelCase = [] for i in range(6): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""", F"""encoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", F"""encoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""encoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""encoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""encoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""encoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.encoder.layers.{i}.norm1.weight""", F"""encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""encoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""encoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""encoder.layers.{i}.final_layer_norm.bias""")) # decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""", F"""decoder.layers.{i}.encoder_attn.out_proj.weight""", ) ) rename_keys.append( ( F"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""", F"""decoder.layers.{i}.encoder_attn.out_proj.bias""", ) ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""decoder.layers.{i}.fc1.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""decoder.layers.{i}.fc1.bias""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""decoder.layers.{i}.fc2.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""decoder.layers.{i}.fc2.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm1.weight""", F"""decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""decoder.layers.{i}.self_attn_layer_norm.bias""")) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.weight""", F"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append( (F"""transformer.decoder.layers.{i}.norm2.bias""", F"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""decoder.layers.{i}.final_layer_norm.weight""")) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""decoder.layers.{i}.final_layer_norm.bias""")) # convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads rename_keys.extend( [ ('''input_proj.weight''', '''input_projection.weight'''), ('''input_proj.bias''', '''input_projection.bias'''), ('''query_embed.weight''', '''query_position_embeddings.weight'''), ('''transformer.encoder.norm.weight''', '''encoder.layernorm.weight'''), ('''transformer.encoder.norm.bias''', '''encoder.layernorm.bias'''), ('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''), ('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''), ('''class_embed.weight''', '''class_labels_classifier.weight'''), ('''class_embed.bias''', '''class_labels_classifier.bias'''), ('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''), ('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''), ('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''), ('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''), ('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''), ('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''), ] ) def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase = state_dict.pop(__SCREAMING_SNAKE_CASE ) lowercase = val def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = OrderedDict() for key, value in state_dict.items(): if "backbone.0.body" in key: lowercase = key.replace('backbone.0.body' , 'backbone.conv_encoder.model' ) lowercase = value else: lowercase = value return new_state_dict def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = '' # first: transformer encoder for i in range(6 ): # read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias) lowercase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' ) lowercase = state_dict.pop(F'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase = in_proj_weight[:256, :] lowercase = in_proj_bias[:256] lowercase = in_proj_weight[256:512, :] lowercase = in_proj_bias[256:512] lowercase = in_proj_weight[-256:, :] lowercase = in_proj_bias[-256:] # next: transformer decoder (which is a bit more complex because it also includes cross-attention) for i in range(6 ): # read in weights + bias of input projection layer of self-attention lowercase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' ) lowercase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) to the state dict lowercase = in_proj_weight[:256, :] lowercase = in_proj_bias[:256] lowercase = in_proj_weight[256:512, :] lowercase = in_proj_bias[256:512] lowercase = in_proj_weight[-256:, :] lowercase = in_proj_bias[-256:] # read in weights + bias of input projection layer of cross-attention lowercase = state_dict.pop( F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' ) lowercase = state_dict.pop(F'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' ) # next, add query, keys and values (in that order) of cross-attention to the state dict lowercase = in_proj_weight_cross_attn[:256, :] lowercase = in_proj_bias_cross_attn[:256] lowercase = in_proj_weight_cross_attn[256:512, :] lowercase = in_proj_bias_cross_attn[256:512] lowercase = in_proj_weight_cross_attn[-256:, :] lowercase = in_proj_bias_cross_attn[-256:] def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): lowercase , lowercase = image.size lowercase = max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase = 800 if 'detection' in checkpoint_url else 1000 lowercase = target_max_size / current_max_size lowercase = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) ) return resized_image def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ): lowercase = F.to_tensor(__SCREAMING_SNAKE_CASE ) lowercase = F.normalize(__SCREAMING_SNAKE_CASE , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] ) return image @torch.no_grad() def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info('Converting model...' ) # load original state dict lowercase = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' ) # rename keys for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase = rename_backbone_keys(__SCREAMING_SNAKE_CASE ) # query, key and value matrices need special treatment read_in_q_k_v(__SCREAMING_SNAKE_CASE ) # important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them lowercase = 'model.' for key in state_dict.copy().keys(): if not key.startswith('class_labels_classifier' ) and not key.startswith('bbox_predictor' ): lowercase = state_dict.pop(__SCREAMING_SNAKE_CASE ) lowercase = val # create HuggingFace model and load state dict lowercase = TableTransformerConfig( backbone='resnet18' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , ) if "detection" in checkpoint_url: lowercase = 15 lowercase = 2 lowercase = {0: 'table', 1: 'table rotated'} lowercase = idalabel lowercase = {v: k for k, v in idalabel.items()} else: lowercase = 125 lowercase = 6 lowercase = { 0: 'table', 1: 'table column', 2: 'table row', 3: 'table column header', 4: 'table projected row header', 5: 'table spanning cell', } lowercase = idalabel lowercase = {v: k for k, v in idalabel.items()} lowercase = DetrImageProcessor( format='coco_detection' , max_size=800 if 'detection' in checkpoint_url else 1000 ) lowercase = TableTransformerForObjectDetection(__SCREAMING_SNAKE_CASE ) model.load_state_dict(__SCREAMING_SNAKE_CASE ) model.eval() # verify our conversion lowercase = 'example_pdf.png' if 'detection' in checkpoint_url else 'example_table.png' lowercase = hf_hub_download(repo_id='nielsr/example-pdf' , repo_type='dataset' , filename=__SCREAMING_SNAKE_CASE ) lowercase = Image.open(__SCREAMING_SNAKE_CASE ).convert('RGB' ) lowercase = normalize(resize(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ).unsqueeze(0 ) lowercase = model(__SCREAMING_SNAKE_CASE ) if "detection" in checkpoint_url: lowercase = (1, 15, 3) lowercase = torch.tensor( [[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] ) lowercase = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] ) else: lowercase = (1, 125, 7) lowercase = torch.tensor( [[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] ) lowercase = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: # Save model and image processor logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: # Push model to HF hub logger.info('Pushing model to the hub...' ) lowercase = ( 'microsoft/table-transformer-detection' if 'detection' in checkpoint_url else 'microsoft/table-transformer-structure-recognition' ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) image_processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', type=str, choices=[ '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth''', '''https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth''', ], help='''URL of the Table Transformer checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) UpperCAmelCase = parser.parse_args() convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
84
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : str =logging.get_logger(__name__) __lowerCAmelCase : Any ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert''' def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = vocab_size __SCREAMING_SNAKE_CASE : List[str] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : str = initializer_range __SCREAMING_SNAKE_CASE : Dict = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = position_embedding_type __SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
696
0
import functools from typing import Any def _a ( lowercase__ : str , lowercase__ : list[str] ): '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ) or len(lowercase__ ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(lowercase__ , lowercase__ ) or not all( isinstance(lowercase__ , lowercase__ ) and len(lowercase__ ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie SCREAMING_SNAKE_CASE__ : dict[str, Any] = {} SCREAMING_SNAKE_CASE__ : List[Any] = 'WORD_KEEPER' for word in words: SCREAMING_SNAKE_CASE__ : List[Any] = trie for c in word: if c not in trie_node: SCREAMING_SNAKE_CASE__ : Optional[int] = {} SCREAMING_SNAKE_CASE__ : Any = trie_node[c] SCREAMING_SNAKE_CASE__ : Optional[int] = True SCREAMING_SNAKE_CASE__ : int = len(lowercase__ ) # Dynamic programming method @functools.cache def is_breakable(lowercase__ : int ) -> bool: if index == len_string: return True SCREAMING_SNAKE_CASE__ : Tuple = trie for i in range(lowercase__ , lowercase__ ): SCREAMING_SNAKE_CASE__ : Any = trie_node.get(string[i] , lowercase__ ) if trie_node is None: return False if trie_node.get(lowercase__ , lowercase__ ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
85
import os import sys import unittest __lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers') __lowerCAmelCase : Optional[Any] ='\n{0} = None\n' __lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' __lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Tuple ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tokenizers''' ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' ) __SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' ) __SCREAMING_SNAKE_CASE : Any = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' ) __SCREAMING_SNAKE_CASE : List[str] = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' ) def __magic_name__( self :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , lowerCAmelCase__ ) self.assertIn('''tensorflow_text''' , lowerCAmelCase__ ) self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def __magic_name__( self :Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' ) __SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __SCREAMING_SNAKE_CASE : int = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
696
0
import argparse import io import requests import torch from omegaconf import OmegaConf from diffusers import AutoencoderKL from diffusers.pipelines.stable_diffusion.convert_from_ckpt import ( assign_to_checkpoint, conv_attn_to_linear, create_vae_diffusers_config, renew_vae_attention_paths, renew_vae_resnet_paths, ) def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : List[str] ): """simple docstring""" A_ = checkpoint A_ = {} A_ = vae_state_dict["encoder.conv_in.weight"] A_ = vae_state_dict["encoder.conv_in.bias"] A_ = vae_state_dict["encoder.conv_out.weight"] A_ = vae_state_dict["encoder.conv_out.bias"] A_ = vae_state_dict["encoder.norm_out.weight"] A_ = vae_state_dict["encoder.norm_out.bias"] A_ = vae_state_dict["decoder.conv_in.weight"] A_ = vae_state_dict["decoder.conv_in.bias"] A_ = vae_state_dict["decoder.conv_out.weight"] A_ = vae_state_dict["decoder.conv_out.bias"] A_ = vae_state_dict["decoder.norm_out.weight"] A_ = vae_state_dict["decoder.norm_out.bias"] A_ = vae_state_dict["quant_conv.weight"] A_ = vae_state_dict["quant_conv.bias"] A_ = vae_state_dict["post_quant_conv.weight"] A_ = vae_state_dict["post_quant_conv.bias"] # Retrieves the keys for the encoder down blocks only A_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} ) A_ = { layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } # Retrieves the keys for the decoder up blocks only A_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} ) A_ = { layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(__UpperCamelCase ) } for i in range(__UpperCamelCase ): A_ = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key] if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict: A_ = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.weight''' ) A_ = vae_state_dict.pop( f'''encoder.down.{i}.downsample.conv.bias''' ) A_ = renew_vae_resnet_paths(__UpperCamelCase ) A_ = {"old": f'''down.{i}.block''', "new": f'''down_blocks.{i}.resnets'''} assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase ) A_ = [key for key in vae_state_dict if "encoder.mid.block" in key] A_ = 2 for i in range(1 ,num_mid_res_blocks + 1 ): A_ = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key] A_ = renew_vae_resnet_paths(__UpperCamelCase ) A_ = {"old": f'''mid.block_{i}''', "new": f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase ) A_ = [key for key in vae_state_dict if "encoder.mid.attn" in key] A_ = renew_vae_attention_paths(__UpperCamelCase ) A_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase ) conv_attn_to_linear(__UpperCamelCase ) for i in range(__UpperCamelCase ): A_ = num_up_blocks - 1 - i A_ = [ key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key ] if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict: A_ = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.weight''' ] A_ = vae_state_dict[ f'''decoder.up.{block_id}.upsample.conv.bias''' ] A_ = renew_vae_resnet_paths(__UpperCamelCase ) A_ = {"old": f'''up.{block_id}.block''', "new": f'''up_blocks.{i}.resnets'''} assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase ) A_ = [key for key in vae_state_dict if "decoder.mid.block" in key] A_ = 2 for i in range(1 ,num_mid_res_blocks + 1 ): A_ = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key] A_ = renew_vae_resnet_paths(__UpperCamelCase ) A_ = {"old": f'''mid.block_{i}''', "new": f'''mid_block.resnets.{i - 1}'''} assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase ) A_ = [key for key in vae_state_dict if "decoder.mid.attn" in key] A_ = renew_vae_attention_paths(__UpperCamelCase ) A_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"} assign_to_checkpoint(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,additional_replacements=[meta_path] ,config=__UpperCamelCase ) conv_attn_to_linear(__UpperCamelCase ) return new_checkpoint def __snake_case ( __UpperCamelCase : str ,__UpperCamelCase : str ,): """simple docstring""" A_ = requests.get( " https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" ) A_ = io.BytesIO(r.content ) A_ = OmegaConf.load(__UpperCamelCase ) A_ = 512 A_ = "cuda" if torch.cuda.is_available() else "cpu" if checkpoint_path.endswith("safetensors" ): from safetensors import safe_open A_ = {} with safe_open(__UpperCamelCase ,framework="pt" ,device="cpu" ) as f: for key in f.keys(): A_ = f.get_tensor(__UpperCamelCase ) else: A_ = torch.load(__UpperCamelCase ,map_location=__UpperCamelCase )["state_dict"] # Convert the VAE model. A_ = create_vae_diffusers_config(__UpperCamelCase ,image_size=__UpperCamelCase ) A_ = custom_convert_ldm_vae_checkpoint(__UpperCamelCase ,__UpperCamelCase ) A_ = AutoencoderKL(**__UpperCamelCase ) vae.load_state_dict(__UpperCamelCase ) vae.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": __a :str = argparse.ArgumentParser() parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.') __a :List[str] = parser.parse_args() vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
86
import math from numpy import inf from scipy.integrate import quad def _UpperCamelCase ( lowercase__ ): if num <= 0: raise ValueError('''math domain error''' ) return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0] def _UpperCamelCase ( lowercase__ , lowercase__ ): return math.pow(lowercase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
696
0
import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils""")) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 _lowerCamelCase : Optional[Any] = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""") _lowerCamelCase : Union[str, Any] = get_tests_dir("""fixtures/vocab.json""") _lowerCamelCase : List[Any] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->List[str]: '''simple docstring''' A__ = 0 def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->int: '''simple docstring''' A__ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''') self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Any: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A__ = WavaVecaConfig() A__ = AutoProcessor.from_pretrained('''facebook/wav2vec2-base-960h''') # save in new folder model_config.save_pretrained(UpperCAmelCase__) processor.save_pretrained(UpperCAmelCase__) A__ = AutoProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Any: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , UpperCAmelCase__)) copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''vocab.json''')) A__ = AutoProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A__ = WavaVecaFeatureExtractor() A__ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''') A__ = WavaVecaProcessor(UpperCAmelCase__ , UpperCAmelCase__) # save in new folder processor.save_pretrained(UpperCAmelCase__) # drop `processor_class` in tokenizer with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''r''') as f: A__ = json.load(UpperCAmelCase__) config_dict.pop('''processor_class''') with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''w''') as f: f.write(json.dumps(UpperCAmelCase__)) A__ = AutoProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A__ = WavaVecaFeatureExtractor() A__ = AutoTokenizer.from_pretrained('''facebook/wav2vec2-base-960h''') A__ = WavaVecaProcessor(UpperCAmelCase__ , UpperCAmelCase__) # save in new folder processor.save_pretrained(UpperCAmelCase__) # drop `processor_class` in feature extractor with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''r''') as f: A__ = json.load(UpperCAmelCase__) config_dict.pop('''processor_class''') with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''w''') as f: f.write(json.dumps(UpperCAmelCase__)) A__ = AutoProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : str) ->Optional[int]: '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: A__ = WavaVecaConfig(processor_class='''Wav2Vec2Processor''') model_config.save_pretrained(UpperCAmelCase__) # copy relevant files copyfile(UpperCAmelCase__ , os.path.join(UpperCAmelCase__ , '''vocab.json''')) # create emtpy sample processor with open(os.path.join(UpperCAmelCase__ , UpperCAmelCase__) , '''w''') as f: f.write('''{}''') A__ = AutoProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' with self.assertRaises(UpperCAmelCase__): A__ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''') # If remote code is disabled, we can't load this config. with self.assertRaises(UpperCAmelCase__): A__ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__) A__ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__) self.assertTrue(processor.special_attribute_present) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''') A__ = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present) self.assertEqual(feature_extractor.__class__.__name__ , '''NewFeatureExtractor''') A__ = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizerFast''') # Test we can also load the slow version A__ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__ , use_fast=UpperCAmelCase__) A__ = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present) self.assertEqual(new_tokenizer.__class__.__name__ , '''NewTokenizer''') else: self.assertEqual(tokenizer.__class__.__name__ , '''NewTokenizer''') def SCREAMING_SNAKE_CASE ( self : Optional[Any]) ->Union[str, Any]: '''simple docstring''' try: AutoConfig.register('''custom''' , UpperCAmelCase__) AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__) AutoTokenizer.register(UpperCAmelCase__ , slow_tokenizer_class=UpperCAmelCase__) AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(UpperCAmelCase__): AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__) # Now that the config is registered, it can be used as any other config with the auto-API A__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__) with tempfile.TemporaryDirectory() as tmp_dir: A__ = os.path.join(UpperCAmelCase__ , '''vocab.txt''') with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens])) A__ = CustomTokenizer(UpperCAmelCase__) A__ = CustomProcessor(UpperCAmelCase__ , UpperCAmelCase__) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(UpperCAmelCase__) A__ = AutoProcessor.from_pretrained(UpperCAmelCase__) self.assertIsInstance(UpperCAmelCase__ , UpperCAmelCase__) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Tuple: '''simple docstring''' class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = False class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = False class UpperCamelCase_ ( UpperCAmelCase__ ): '''simple docstring''' UpperCAmelCase__ = '''AutoFeatureExtractor''' UpperCAmelCase__ = '''AutoTokenizer''' UpperCAmelCase__ = False try: AutoConfig.register('''custom''' , UpperCAmelCase__) AutoFeatureExtractor.register(UpperCAmelCase__ , UpperCAmelCase__) AutoTokenizer.register(UpperCAmelCase__ , slow_tokenizer_class=UpperCAmelCase__) AutoProcessor.register(UpperCAmelCase__ , UpperCAmelCase__) # If remote code is not set, the default is to use local classes. A__ = AutoProcessor.from_pretrained('''hf-internal-testing/test_dynamic_processor''') self.assertEqual(processor.__class__.__name__ , '''NewProcessor''') self.assertFalse(processor.special_attribute_present) self.assertFalse(processor.feature_extractor.special_attribute_present) self.assertFalse(processor.tokenizer.special_attribute_present) # If remote code is disabled, we load the local ones. A__ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''') self.assertFalse(processor.special_attribute_present) self.assertFalse(processor.feature_extractor.special_attribute_present) self.assertFalse(processor.tokenizer.special_attribute_present) # If remote is enabled, we load from the Hub. A__ = AutoProcessor.from_pretrained( '''hf-internal-testing/test_dynamic_processor''' , trust_remote_code=UpperCAmelCase__) self.assertEqual(processor.__class__.__name__ , '''NewProcessor''') self.assertTrue(processor.special_attribute_present) self.assertTrue(processor.feature_extractor.special_attribute_present) self.assertTrue(processor.tokenizer.special_attribute_present) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def SCREAMING_SNAKE_CASE ( self : Optional[int]) ->Dict: '''simple docstring''' A__ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-bert''') self.assertEqual(processor.__class__.__name__ , '''BertTokenizerFast''') def SCREAMING_SNAKE_CASE ( self : Any) ->Optional[Any]: '''simple docstring''' A__ = AutoProcessor.from_pretrained('''hf-internal-testing/tiny-random-convnext''') self.assertEqual(processor.__class__.__name__ , '''ConvNextImageProcessor''') @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''bla''', '''blou'''] @classmethod def SCREAMING_SNAKE_CASE ( cls : Any) ->List[Any]: '''simple docstring''' A__ = TOKEN HfFolder.save_token(UpperCAmelCase__) @classmethod def SCREAMING_SNAKE_CASE ( cls : Union[str, Any]) ->Any: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='''test-processor''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-processor-org''') except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-processor''') except HTTPError: pass def SCREAMING_SNAKE_CASE ( self : List[str]) ->Any: '''simple docstring''' A__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase__) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(UpperCAmelCase__ , '''test-processor''') , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token) A__ = WavaVecaProcessor.from_pretrained(f"""{USER}/test-processor""") for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(UpperCAmelCase__ , getattr(new_processor.feature_extractor , UpperCAmelCase__)) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab()) def SCREAMING_SNAKE_CASE ( self : List[Any]) ->str: '''simple docstring''' A__ = WavaVecaProcessor.from_pretrained(UpperCAmelCase__) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(UpperCAmelCase__ , '''test-processor-org''') , push_to_hub=UpperCAmelCase__ , use_auth_token=self._token , organization='''valid_org''' , ) A__ = WavaVecaProcessor.from_pretrained('''valid_org/test-processor-org''') for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(UpperCAmelCase__ , getattr(new_processor.feature_extractor , UpperCAmelCase__)) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab()) def SCREAMING_SNAKE_CASE ( self : Union[str, Any]) ->Union[str, Any]: '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() A__ = CustomFeatureExtractor.from_pretrained(UpperCAmelCase__) with tempfile.TemporaryDirectory() as tmp_dir: A__ = os.path.join(UpperCAmelCase__ , '''vocab.txt''') with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''') as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in self.vocab_tokens])) A__ = CustomTokenizer(UpperCAmelCase__) A__ = CustomProcessor(UpperCAmelCase__ , UpperCAmelCase__) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f"""{USER}/test-dynamic-processor""" , token=self._token) A__ = Repository(UpperCAmelCase__ , clone_from=f"""{USER}/test-dynamic-processor""" , token=self._token) processor.save_pretrained(UpperCAmelCase__) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { '''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor''', '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(UpperCAmelCase__ , '''tokenizer_config.json''')) as f: A__ = json.load(UpperCAmelCase__) self.assertDictEqual( tokenizer_config['''auto_map'''] , { '''AutoTokenizer''': ['''custom_tokenization.CustomTokenizer''', None], '''AutoProcessor''': '''custom_processing.CustomProcessor''', } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_feature_extraction.py'''))) self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_tokenization.py'''))) self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase__ , '''custom_processing.py'''))) repo.push_to_hub() A__ = AutoProcessor.from_pretrained(f"""{USER}/test-dynamic-processor""" , trust_remote_code=UpperCAmelCase__) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , '''CustomProcessor''')
87
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate __SCREAMING_SNAKE_CASE : int = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
696
0
"""simple docstring""" import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = False, False, False @dataclass class lowercase__ : __UpperCAmelCase = None __UpperCAmelCase = True __UpperCAmelCase = True __UpperCAmelCase = None # Automatically constructed __UpperCAmelCase = "dict" __UpperCAmelCase = pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) __UpperCAmelCase = field(default='''Audio''' ,init=A_ ,repr=A_ ) def __call__( self) -> Optional[int]: return self.pa_type def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError("""To support encoding audio data, please install 'soundfile'.""") from err if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): return {"bytes": None, "path": value} elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes _lowerCamelCase : str = BytesIO() sf.write(SCREAMING_SNAKE_CASE , value["""array"""] , value["""sampling_rate"""] , format="""wav""") return {"bytes": buffer.getvalue(), "path": None} elif value.get("""path""") is not None and os.path.isfile(value["""path"""]): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith("""pcm"""): # "PCM" only has raw audio bytes if value.get("""sampling_rate""") is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError("""To use PCM files, please specify a 'sampling_rate' in Audio object""") if value.get("""bytes"""): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) _lowerCamelCase : Dict = np.frombuffer(value["""bytes"""] , dtype=np.intaa).astype(np.floataa) / 3_2767 else: _lowerCamelCase : Optional[Any] = np.memmap(value["""path"""] , dtype="""h""" , mode="""r""").astype(np.floataa) / 3_2767 _lowerCamelCase : List[Any] = BytesIO(bytes()) sf.write(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , value["""sampling_rate"""] , format="""wav""") return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get("""path""")} elif value.get("""bytes""") is not None or value.get("""path""") is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get("""bytes"""), "path": value.get("""path""")} else: raise ValueError( F'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.') def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = None) -> dict: if not self.decode: raise RuntimeError("""Decoding is disabled for this feature. Please use Audio(decode=True) instead.""") _lowerCamelCase , _lowerCamelCase : Optional[Any] = (value["""path"""], BytesIO(value["""bytes"""])) if value["""bytes"""] is not None else (value["""path"""], None) if path is None and file is None: raise ValueError(F'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.') try: import librosa import soundfile as sf except ImportError as err: raise ImportError("""To support decoding audio files, please install 'librosa' and 'soundfile'.""") from err _lowerCamelCase : List[Any] = xsplitext(SCREAMING_SNAKE_CASE)[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( """Decoding 'opus' files requires system library 'libsndfile'>=1.0.31, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( """Decoding 'mp3' files requires system library 'libsndfile'>=1.1.0, """ """You can try to update `soundfile` python library: `pip install \"soundfile>=0.12.1\"`. """) if file is None: _lowerCamelCase : List[Any] = token_per_repo_id or {} _lowerCamelCase : Dict = path.split("""::""")[-1] try: _lowerCamelCase : str = string_to_dict(SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL)["""repo_id"""] _lowerCamelCase : Tuple = token_per_repo_id[repo_id] except (ValueError, KeyError): _lowerCamelCase : Tuple = None with xopen(SCREAMING_SNAKE_CASE , """rb""" , use_auth_token=SCREAMING_SNAKE_CASE) as f: _lowerCamelCase , _lowerCamelCase : Dict = sf.read(SCREAMING_SNAKE_CASE) else: _lowerCamelCase , _lowerCamelCase : Tuple = sf.read(SCREAMING_SNAKE_CASE) _lowerCamelCase : str = array.T if self.mono: _lowerCamelCase : Optional[int] = librosa.to_mono(SCREAMING_SNAKE_CASE) if self.sampling_rate and self.sampling_rate != sampling_rate: _lowerCamelCase : Optional[Any] = librosa.resample(SCREAMING_SNAKE_CASE , orig_sr=SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate) _lowerCamelCase : Dict = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def UpperCamelCase_ ( self) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError("""Cannot flatten a decoded Audio feature.""") return { "bytes": Value("""binary"""), "path": Value("""string"""), } def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> pa.StructArray: if pa.types.is_string(storage.type): _lowerCamelCase : Dict = pa.array([None] * len(SCREAMING_SNAKE_CASE) , type=pa.binary()) _lowerCamelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null()) elif pa.types.is_binary(storage.type): _lowerCamelCase : List[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE) , type=pa.string()) _lowerCamelCase : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null()) elif pa.types.is_struct(storage.type) and storage.type.get_all_field_indices("""array"""): _lowerCamelCase : List[Any] = pa.array([Audio().encode_example(SCREAMING_SNAKE_CASE) if x is not None else None for x in storage.to_pylist()]) elif pa.types.is_struct(storage.type): if storage.type.get_field_index("""bytes""") >= 0: _lowerCamelCase : Any = storage.field("""bytes""") else: _lowerCamelCase : List[Any] = pa.array([None] * len(SCREAMING_SNAKE_CASE) , type=pa.binary()) if storage.type.get_field_index("""path""") >= 0: _lowerCamelCase : List[Any] = storage.field("""path""") else: _lowerCamelCase : List[str] = pa.array([None] * len(SCREAMING_SNAKE_CASE) , type=pa.string()) _lowerCamelCase : Tuple = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null()) return array_cast(SCREAMING_SNAKE_CASE , self.pa_type) def UpperCamelCase_ ( self , SCREAMING_SNAKE_CASE) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(SCREAMING_SNAKE_CASE): with xopen(SCREAMING_SNAKE_CASE , """rb""") as f: _lowerCamelCase : int = f.read() return bytes_ _lowerCamelCase : Any = pa.array( [ (path_to_bytes(x["""path"""]) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _lowerCamelCase : Tuple = pa.array( [os.path.basename(SCREAMING_SNAKE_CASE) if path is not None else None for path in storage.field("""path""").to_pylist()] , type=pa.string() , ) _lowerCamelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null()) return array_cast(SCREAMING_SNAKE_CASE , self.pa_type)
88
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ ) if is_square_free(lowercase__ ): return -1 if len(lowercase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
696
0
import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class _lowerCamelCase( _a, unittest.TestCase ): lowercase_ : Any = ShapEImgaImgPipeline lowercase_ : Optional[int] = ["""image"""] lowercase_ : Optional[Any] = ["""image"""] lowercase_ : Optional[Any] = [ """num_images_per_prompt""", """num_inference_steps""", """generator""", """latents""", """guidance_scale""", """frame_size""", """output_type""", """return_dict""", ] lowercase_ : Optional[int] = False @property def UpperCamelCase ( self) -> Optional[int]: """simple docstring""" return 32 @property def UpperCamelCase ( self) -> Dict: """simple docstring""" return 32 @property def UpperCamelCase ( self) -> Any: """simple docstring""" return self.time_input_dim * 4 @property def UpperCamelCase ( self) -> Union[str, Any]: """simple docstring""" return 8 @property def UpperCamelCase ( self) -> List[Any]: """simple docstring""" torch.manual_seed(0) _lowercase : Optional[Any] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size, image_size=64, projection_dim=self.text_embedder_hidden_size, intermediate_size=37, num_attention_heads=4, num_channels=3, num_hidden_layers=5, patch_size=1, ) _lowercase : str = CLIPVisionModel(lowerCamelCase) return model @property def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Optional[Any] = CLIPImageProcessor( crop_size=2_24, do_center_crop=lowerCamelCase, do_normalize=lowerCamelCase, do_resize=lowerCamelCase, image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], resample=3, size=2_24, ) return image_processor @property def UpperCamelCase ( self) -> List[Any]: """simple docstring""" torch.manual_seed(0) _lowercase : Tuple = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } _lowercase : Union[str, Any] = PriorTransformer(**lowerCamelCase) return model @property def UpperCamelCase ( self) -> Any: """simple docstring""" torch.manual_seed(0) _lowercase : str = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } _lowercase : Tuple = ShapERenderer(**lowerCamelCase) return model def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : str = self.dummy_prior _lowercase : Optional[Any] = self.dummy_image_encoder _lowercase : List[str] = self.dummy_image_processor _lowercase : Union[str, Any] = self.dummy_renderer _lowercase : Optional[int] = HeunDiscreteScheduler( beta_schedule='exp', num_train_timesteps=10_24, prediction_type='sample', use_karras_sigmas=lowerCamelCase, clip_sample=lowerCamelCase, clip_sample_range=1.0, ) _lowercase : Union[str, Any] = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def UpperCamelCase ( self, lowerCamelCase, lowerCamelCase=0) -> Dict: """simple docstring""" _lowercase : int = floats_tensor((1, 3, 64, 64), rng=random.Random(lowerCamelCase)).to(lowerCamelCase) if str(lowerCamelCase).startswith('mps'): _lowercase : List[Any] = torch.manual_seed(lowerCamelCase) else: _lowercase : List[str] = torch.Generator(device=lowerCamelCase).manual_seed(lowerCamelCase) _lowercase : List[Any] = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def UpperCamelCase ( self) -> Any: """simple docstring""" _lowercase : List[Any] = 'cpu' _lowercase : List[str] = self.get_dummy_components() _lowercase : Optional[Any] = self.pipeline_class(**lowerCamelCase) _lowercase : List[Any] = pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : Dict = pipe(**self.get_dummy_inputs(lowerCamelCase)) _lowercase : str = output.images[0] _lowercase : Any = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) _lowercase : Tuple = np.array( [ 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, 0.0_0_0_3_9_2_1_6, ]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1E-2 def UpperCamelCase ( self) -> List[str]: """simple docstring""" self._test_inference_batch_consistent(batch_sizes=[1, 2]) def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" _lowercase : Tuple = torch_device == 'cpu' _lowercase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2, test_max_difference=lowerCamelCase, relax_max_difference=lowerCamelCase, ) def UpperCamelCase ( self) -> Tuple: """simple docstring""" _lowercase : Union[str, Any] = self.get_dummy_components() _lowercase : List[Any] = self.pipeline_class(**lowerCamelCase) _lowercase : Tuple = pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : List[Any] = 1 _lowercase : Union[str, Any] = 2 _lowercase : List[str] = self.get_dummy_inputs(lowerCamelCase) for key in inputs.keys(): if key in self.batch_params: _lowercase : List[Any] = batch_size * [inputs[key]] _lowercase : Dict = pipe(**lowerCamelCase, num_images_per_prompt=lowerCamelCase)[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class _lowerCamelCase( unittest.TestCase ): def UpperCamelCase ( self) -> Optional[Any]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self) -> str: """simple docstring""" _lowercase : Dict = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png') _lowercase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy') _lowercase : Any = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img') _lowercase : Optional[int] = pipe.to(lowerCamelCase) pipe.set_progress_bar_config(disable=lowerCamelCase) _lowercase : str = torch.Generator(device=lowerCamelCase).manual_seed(0) _lowercase : int = pipe( lowerCamelCase, generator=lowerCamelCase, guidance_scale=3.0, num_inference_steps=64, frame_size=64, output_type='np', ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(lowerCamelCase, lowerCamelCase)
89
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __lowerCAmelCase : int ={ 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_2_8, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 5_0, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 1_0, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 1_0, 'exponential_decay_length_penalty': (5, 1.0_1), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class _lowercase ( unittest.TestCase ): '''simple docstring''' @classmethod def __magic_name__( cls :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = TOKEN HfFolder.save_token(lowerCAmelCase__ ) @classmethod def __magic_name__( cls :List[str] ) -> List[str]: try: delete_repo(token=cls._token , repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''' ) except HTTPError: pass def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''test-config''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :Dict ) -> Optional[int]: CustomConfig.register_for_auto_class() __SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 ) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' ) self.assertEqual(new_config.attribute , 42 ) class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :List[str] ) -> Dict: __SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int __SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float __SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool __SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str c.update_from_string( f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' ) self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' ) self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' ) self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' ) def __magic_name__( self :Dict ) -> str: __SCREAMING_SNAKE_CASE : Dict = PretrainedConfig() __SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) __SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )] if len(lowerCAmelCase__ ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' f''' {', '.join(lowerCAmelCase__ )}.''' ) def __magic_name__( self :Union[str, Any] ) -> List[Any]: with self.assertRaises(lowerCAmelCase__ ): # config is in subfolder, the following should not work without specifying the subfolder __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) __SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' ) self.assertIsNotNone(lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down __SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock() __SCREAMING_SNAKE_CASE : List[Any] = 500 __SCREAMING_SNAKE_CASE : Union[str, Any] = {} __SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError __SCREAMING_SNAKE_CASE : str = {} # Download this model to make sure it's in the cache. __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head: __SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def __magic_name__( self :Union[str, Any] ) -> List[Any]: # This test is for deprecated behavior and can be removed in v5 __SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def __magic_name__( self :str ) -> List[str]: __SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = 2 json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json'''] __SCREAMING_SNAKE_CASE : Tuple = 768 configuration.save_pretrained(lowerCAmelCase__ ) shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) ) __SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 768 ) def __magic_name__( self :List[str] ) -> Union[str, Any]: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. __SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers __SCREAMING_SNAKE_CASE : int = '''v4.0.0''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained( lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCAmelCase__ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0''' __SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(old_configuration.hidden_size , 768 )
696
0
'''simple docstring''' # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class a__ ( a__ ): '''simple docstring''' lowercase__ : List[str] = "facebook/bart-large-mnli" lowercase__ : str = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) lowercase__ : List[str] = "text_classifier" lowercase__ : List[str] = AutoTokenizer lowercase__ : int = AutoModelForSequenceClassification lowercase__ : Optional[Any] = ["text", ["text"]] lowercase__ : str = ["text"] def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]: super().setup() lowerCAmelCase__ = self.model.config lowerCAmelCase__ = -1 for idx, label in config.idalabel.items(): if label.lower().startswith('''entail''' ): lowerCAmelCase__ = int(lowerCamelCase_ ) if self.entailment_id == -1: raise ValueError('''Could not determine the entailment ID from the model config, please pass it at init.''' ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> Any: lowerCAmelCase__ = labels return self.pre_processor( [text] * len(lowerCamelCase_ ) , [F"""This example is {label}""" for label in labels] , return_tensors='''pt''' , padding='''max_length''' , ) def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> str: lowerCAmelCase__ = outputs.logits lowerCAmelCase__ = torch.argmax(logits[:, 2] ).item() return self._labels[label_id]
90
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Any ={ 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int =['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str =[ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
696
0
"""simple docstring""" def _snake_case ( snake_case__ : list ): if not grid or not grid[0]: raise TypeError('The grid does not contain the appropriate information' ) for cell_n in range(1 , len(grid[0] ) ): grid[0][cell_n] += grid[0][cell_n - 1] A = grid[0] for row_n in range(1 , len(snake_case__ ) ): A = grid[row_n] A = fill_row(snake_case__ , snake_case__ ) A = grid[row_n] return grid[-1][-1] def _snake_case ( snake_case__ : list , snake_case__ : list ): current_row[0] += row_above[0] for cell_n in range(1 , len(snake_case__ ) ): current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] ) return current_row if __name__ == "__main__": import doctest doctest.testmod()
91
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : List[Any] ={ 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers''' SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values'''] SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple: __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : Optional[int] = d_kv __SCREAMING_SNAKE_CASE : Tuple = d_ff __SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers __SCREAMING_SNAKE_CASE : List[Any] = num_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers else: __SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: __SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers __SCREAMING_SNAKE_CASE : List[Any] = num_heads __SCREAMING_SNAKE_CASE : List[Any] = num_experts __SCREAMING_SNAKE_CASE : Tuple = expert_capacity __SCREAMING_SNAKE_CASE : List[Any] = router_bias __SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) __SCREAMING_SNAKE_CASE : List[Any] = router_dtype __SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens __SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets __SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance __SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate __SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon __SCREAMING_SNAKE_CASE : int = initializer_factor __SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj __SCREAMING_SNAKE_CASE : Any = use_cache __SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs __SCREAMING_SNAKE_CASE : int = router_z_loss_coef __SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef __SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' ) __SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1] __SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated''' if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new''' super().__init__( pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
696
0
'''simple docstring''' def _lowerCAmelCase ( __magic_name__ : Optional[int] ) -> Union[str, Any]: return [ { 0: [1, 2], 1: [0, 2], 2: [0, 1, 3, 5], 3: [2, 4], 4: [3], 5: [2, 6, 8], 6: [5, 7], 7: [6, 8], 8: [5, 7], }, { 0: [6], 1: [9], 2: [4, 5], 3: [4], 4: [2, 3], 5: [2], 6: [0, 7], 7: [6], 8: [], 9: [1], }, { 0: [4], 1: [6], 2: [], 3: [5, 6, 7], 4: [0, 6], 5: [3, 8, 9], 6: [1, 3, 4, 7], 7: [3, 6, 8, 9], 8: [5, 7], 9: [5, 7], }, { 0: [1, 3], 1: [0, 2, 4], 2: [1, 3, 4], 3: [0, 2, 4], 4: [1, 2, 3], }, ][index] def _lowerCAmelCase ( __magic_name__ : dict[int, list[int]] ) -> list[tuple[int, int]]: lowercase : Union[str, Any] =0 lowercase : Tuple =len(__magic_name__ ) # No of vertices in graph lowercase : Optional[Any] =[0] * n lowercase : List[str] =[False] * n def dfs(__magic_name__ : Optional[Any] , __magic_name__ : Dict , __magic_name__ : int , __magic_name__ : Optional[int] ): lowercase : List[str] =True lowercase : Union[str, Any] =id_ id_ += 1 for to in graph[at]: if to == parent: pass elif not visited[to]: dfs(__magic_name__ , __magic_name__ , __magic_name__ , id_ ) lowercase : Dict =min(low[at] , low[to] ) if id_ <= low[to]: bridges.append((at, to) if at < to else (to, at) ) else: # This edge is a back edge and cannot be a bridge lowercase : Optional[int] =min(low[at] , low[to] ) lowercase : list[tuple[int, int]] =[] for i in range(__magic_name__ ): if not visited[i]: dfs(__magic_name__ , -1 , __magic_name__ , id_ ) return bridges if __name__ == "__main__": import doctest doctest.testmod()
92
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
696
0
"""simple docstring""" from typing import Any, Dict, List, Optional, Tuple, Union import torch from torch import nn from torch.utils.data import DistributedSampler, RandomSampler from transformers import PreTrainedModel, Trainer, logging from transformers.integrations import is_fairscale_available from transformers.models.fsmt.configuration_fsmt import FSMTConfig from transformers.optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.trainer_pt_utils import get_tpu_sampler from transformers.training_args import ParallelMode from transformers.utils import is_torch_tpu_available if is_fairscale_available(): from fairscale.optim import OSS __A = logging.get_logger(__name__) __A = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, """constant""": get_constant_schedule, """constant_w_warmup""": get_constant_schedule_with_warmup, } class _lowerCAmelCase ( a ): """simple docstring""" def __init__( self , __UpperCAmelCase=None , __UpperCAmelCase=None , *__UpperCAmelCase , **__UpperCAmelCase ): '''simple docstring''' super().__init__(*__UpperCAmelCase , **__UpperCAmelCase ) if config is None: assert isinstance(self.model , __UpperCAmelCase ), ( "If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is" F" {self.model.__class__}" ) lowerCAmelCase__ :List[str] = self.model.config else: lowerCAmelCase__ :str = config lowerCAmelCase__ :Union[str, Any] = data_args lowerCAmelCase__ :str = self.config.tgt_vocab_size if isinstance(self.config , __UpperCAmelCase ) else self.config.vocab_size if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss): assert self.config.pad_token_id is not None, ( "Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss" " calculation or doing label smoothing." ) if self.config.pad_token_id is None and self.config.eos_token_id is not None: logger.warning( F"The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for" ' padding..' ) if self.args.label_smoothing == 0: lowerCAmelCase__ :str = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id ) else: # dynamically import label_smoothed_nll_loss from utils import label_smoothed_nll_loss lowerCAmelCase__ :List[Any] = label_smoothed_nll_loss def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' if self.optimizer is None: lowerCAmelCase__ :Union[str, Any] = ['bias', 'LayerNorm.weight'] lowerCAmelCase__ :Dict = [ { 'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )], 'weight_decay': self.args.weight_decay, }, { 'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )], 'weight_decay': 0.0, }, ] lowerCAmelCase__ :Any = Adafactor if self.args.adafactor else AdamW if self.args.adafactor: lowerCAmelCase__ :Optional[Any] = Adafactor lowerCAmelCase__ :Optional[Any] = {'scale_parameter': False, 'relative_step': False} else: lowerCAmelCase__ :Optional[Any] = AdamW lowerCAmelCase__ :int = { 'betas': (self.args.adam_betaa, self.args.adam_betaa), 'eps': self.args.adam_epsilon, } lowerCAmelCase__ :List[str] = self.args.learning_rate if self.sharded_ddp: lowerCAmelCase__ :List[Any] = OSS( params=__UpperCAmelCase , optim=__UpperCAmelCase , **__UpperCAmelCase , ) else: lowerCAmelCase__ :Tuple = optimizer_cls(__UpperCAmelCase , **__UpperCAmelCase ) if self.lr_scheduler is None: lowerCAmelCase__ :str = self._get_lr_scheduler(__UpperCAmelCase ) else: # ignoring --lr_scheduler logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' ) def snake_case ( self , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Tuple = arg_to_scheduler[self.args.lr_scheduler] if self.args.lr_scheduler == "constant": lowerCAmelCase__ :Optional[Any] = schedule_func(self.optimizer ) elif self.args.lr_scheduler == "constant_w_warmup": lowerCAmelCase__ :str = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps ) else: lowerCAmelCase__ :Union[str, Any] = schedule_func( self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCAmelCase ) return scheduler def snake_case ( self ): '''simple docstring''' if isinstance(self.train_dataset , torch.utils.data.IterableDataset ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset ) else: if self.args.sortish_sampler: self.train_dataset.make_sortish_sampler( self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , ) return ( RandomSampler(self.train_dataset ) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset ) ) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' if self.args.label_smoothing == 0: if self.data_args is not None and self.data_args.ignore_pad_token_for_loss: # force training to ignore pad token lowerCAmelCase__ :Union[str, Any] = model(**__UpperCAmelCase , use_cache=__UpperCAmelCase )[0] lowerCAmelCase__ :Dict = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) ) else: # compute usual loss via models lowerCAmelCase__ , lowerCAmelCase__ :List[Any] = model(**__UpperCAmelCase , labels=__UpperCAmelCase , use_cache=__UpperCAmelCase )[:2] else: # compute label smoothed loss lowerCAmelCase__ :Optional[Any] = model(**__UpperCAmelCase , use_cache=__UpperCAmelCase )[0] lowerCAmelCase__ :Optional[Any] = torch.nn.functional.log_softmax(__UpperCAmelCase , dim=-1 ) lowerCAmelCase__ , lowerCAmelCase__ :str = self.loss_fn(__UpperCAmelCase , __UpperCAmelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id ) return loss, logits def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Optional[int] = inputs.pop('labels' ) lowerCAmelCase__ , lowerCAmelCase__ :Union[str, Any] = self._compute_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) return loss def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , ): '''simple docstring''' lowerCAmelCase__ :Dict = self._prepare_inputs(__UpperCAmelCase ) lowerCAmelCase__ :List[Any] = { 'max_length': self.data_args.val_max_target_length if self.data_args is not None else self.config.max_length, 'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams, } if self.args.predict_with_generate and not self.args.prediction_loss_only: lowerCAmelCase__ :int = self.model.generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **__UpperCAmelCase , ) # in case the batch is shorter than max length, the output should be padded if generated_tokens.shape[-1] < gen_kwargs["max_length"]: lowerCAmelCase__ :Optional[Any] = self._pad_tensors_to_max_len(__UpperCAmelCase , gen_kwargs['max_length'] ) lowerCAmelCase__ :int = inputs.pop('labels' ) with torch.no_grad(): # compute loss on predict data lowerCAmelCase__ , lowerCAmelCase__ :int = self._compute_loss(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) lowerCAmelCase__ :Optional[int] = loss.mean().detach() if self.args.prediction_loss_only: return (loss, None, None) lowerCAmelCase__ :Optional[Any] = generated_tokens if self.args.predict_with_generate else logits if labels.shape[-1] < gen_kwargs["max_length"]: lowerCAmelCase__ :Optional[int] = self._pad_tensors_to_max_len(__UpperCAmelCase , gen_kwargs['max_length'] ) return (loss, logits, labels) def snake_case ( self , __UpperCAmelCase , __UpperCAmelCase ): '''simple docstring''' lowerCAmelCase__ :Union[str, Any] = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id if pad_token_id is None: raise ValueError( 'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be' F" padded to `max_length`={max_length}" ) lowerCAmelCase__ :List[Any] = pad_token_id * torch.ones( (tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device ) lowerCAmelCase__ :Any = tensor return padded_tensor
93
from datetime import datetime import requests def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(lowercase__ ).content if __name__ == "__main__": __lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip() __lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f"""Done. Video saved to disk as {file_name}.""")
696
0
'''simple docstring''' import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , UpperCAmelCase : Optional[Any] , UpperCAmelCase : List[str]=13 , UpperCAmelCase : List[Any]=7 , UpperCAmelCase : Any=True , UpperCAmelCase : Optional[Any]=True , UpperCAmelCase : int=True , UpperCAmelCase : Optional[int]=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Dict=32 , UpperCAmelCase : List[Any]=5 , UpperCAmelCase : Union[str, Any]=4 , UpperCAmelCase : int=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : str=0.1 , UpperCAmelCase : Dict=512 , UpperCAmelCase : str=16 , UpperCAmelCase : Dict=2 , UpperCAmelCase : Dict=0.0_2 , UpperCAmelCase : List[Any]=4 , ) -> Union[str, Any]: '''simple docstring''' lowercase : Tuple =parent lowercase : Any =batch_size lowercase : Union[str, Any] =seq_length lowercase : Optional[Any] =is_training lowercase : List[str] =use_attention_mask lowercase : Dict =use_token_type_ids lowercase : List[Any] =use_labels lowercase : List[Any] =vocab_size lowercase : List[str] =hidden_size lowercase : Dict =num_hidden_layers lowercase : List[str] =num_attention_heads lowercase : Dict =intermediate_size lowercase : List[Any] =hidden_act lowercase : List[str] =hidden_dropout_prob lowercase : Dict =attention_probs_dropout_prob lowercase : Tuple =max_position_embeddings lowercase : List[str] =type_vocab_size lowercase : Union[str, Any] =type_sequence_label_size lowercase : List[Any] =initializer_range lowercase : List[str] =num_choices def A__ ( self : Tuple ) -> Optional[int]: '''simple docstring''' lowercase : int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase : Dict =None if self.use_attention_mask: lowercase : str =random_attention_mask([self.batch_size, self.seq_length] ) lowercase : Optional[int] =None if self.use_token_type_ids: lowercase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase : Optional[int] =RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def A__ ( self : str ) -> Optional[int]: '''simple docstring''' lowercase : Tuple =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : Tuple =config_and_inputs lowercase : Tuple ={'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask} return config, inputs_dict def A__ ( self : str ) -> int: '''simple docstring''' lowercase : int =self.prepare_config_and_inputs() lowercase , lowercase , lowercase , lowercase : Any =config_and_inputs lowercase : Optional[Any] =True lowercase : str =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase : str =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class UpperCAmelCase_ ( __A , unittest.TestCase ): """simple docstring""" UpperCamelCase_ = True UpperCamelCase_ = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def A__ ( self : List[str] ) -> List[str]: '''simple docstring''' lowercase : Optional[Any] =FlaxRobertaPreLayerNormModelTester(self ) @slow def A__ ( self : Union[str, Any] ) -> List[str]: '''simple docstring''' for model_class_name in self.all_model_classes: lowercase : Any =model_class_name.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase ) lowercase : int =model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCAmelCase ) @require_flax class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @slow def A__ ( self : Optional[int] ) -> List[Any]: '''simple docstring''' lowercase : Optional[Any] =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase ) lowercase : Any =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa ) lowercase : List[Any] =model(UpperCAmelCase )[0] lowercase : str =[1, 11, 5_0265] self.assertEqual(list(output.shape ) , UpperCAmelCase ) # compare the actual values for a slice. lowercase : Optional[Any] =np.array( [[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) ) @slow def A__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' lowercase : int =FlaxRobertaPreLayerNormModel.from_pretrained('''andreasmadsen/efficient_mlm_m0.40''' , from_pt=UpperCAmelCase ) lowercase : Optional[Any] =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa ) lowercase : str =model(UpperCAmelCase )[0] # compare the actual values for a slice. lowercase : List[str] =np.array( [[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , UpperCAmelCase , atol=1e-4 ) )
94
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowercase ( A__ , A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __magic_name__( self :int ) -> Optional[int]: torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] __SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ) if str(lowerCAmelCase__ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ ) else: __SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Union[str, Any] ) -> str: __SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Any = self.get_dummy_components() __SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = '''french fries''' __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = output.images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0 __SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5 __SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) __SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) __SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Union[str, Any] ) -> Dict: __SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __magic_name__( self :str ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0] __SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae'''] __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): __SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode() __SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0] __SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Union[str, Any] ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) __SCREAMING_SNAKE_CASE : Dict = { '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Dict ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = self.get_inputs() __SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Optional[int] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : str = self.get_inputs() __SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = 0 def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None: __SCREAMING_SNAKE_CASE : Dict = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: __SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __magic_name__( self :List[str] ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __magic_name__( self :int ) -> Tuple: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 __SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) ) __SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix''' __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = output.images[0] __SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) __SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
696
0
"""simple docstring""" import ast import os import re import shutil import tempfile import unittest from unittest import mock import torch from accelerate.test_utils.examples import compare_against_test from accelerate.test_utils.testing import TempDirTestCase, require_trackers, run_command, slow from accelerate.utils import write_basic_config # DataLoaders built from `test_samples/MRPC` for quick testing # Should mock `{script_name}.get_dataloaders` via: # @mock.patch("{script_name}.get_dataloaders", mocked_dataloaders) lowerCamelCase_ = [ '''cross_validation.py''', '''gradient_accumulation.py''', '''local_sgd.py''', '''multi_process_metrics.py''', '''memory.py''', '''automatic_gradient_accumulation.py''', '''fsdp_with_peak_mem_tracking.py''', '''deepspeed_with_config_support.py''', '''megatron_lm_gpt_pretraining.py''', ] class UpperCamelCase_ (unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : bool , lowerCAmelCase_ : str = None , lowerCAmelCase_ : list = None ) -> Tuple: UpperCAmelCase_ : List[Any] = None UpperCAmelCase_ : int = os.path.abspath(os.path.join("examples" , "by_feature" ) ) UpperCAmelCase_ : int = os.path.abspath("examples" ) for item in os.listdir(lowerCAmelCase_ ): if item not in EXCLUDE_EXAMPLES: UpperCAmelCase_ : int = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) if os.path.isfile(lowerCAmelCase_ ) and ".py" in item_path: with self.subTest( tested_script=lowerCAmelCase_ , feature_script=lowerCAmelCase_ , tested_section="main()" if parser_only else "training_function()" , ): UpperCAmelCase_ : Union[str, Any] = compare_against_test( os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) UpperCAmelCase_ : int = "\n".join(lowerCAmelCase_ ) if special_strings is not None: for string in special_strings: UpperCAmelCase_ : int = diff.replace(lowerCAmelCase_ , "" ) self.assertEqual(lowerCAmelCase_ , "" ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase_ ) self.one_complete_example("complete_nlp_example.py" , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]: UpperCAmelCase_ : str = os.path.abspath(os.path.join("examples" , "cv_example.py" ) ) UpperCAmelCase_ : List[str] = [ " " * 16 + "{\n\n", " " * 20 + "\"accuracy\": eval_metric[\"accuracy\"],\n\n", " " * 20 + "\"f1\": eval_metric[\"f1\"],\n\n", " " * 20 + "\"train_loss\": total_loss.item() / len(train_dataloader),\n\n", " " * 20 + "\"epoch\": epoch,\n\n", " " * 16 + "},\n\n", " " * 16 + "step=epoch,\n", " " * 12, " " * 8 + "for step, batch in enumerate(active_dataloader):\n", ] self.one_complete_example("complete_cv_example.py" , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) self.one_complete_example("complete_cv_example.py" , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @mock.patch.dict(os.environ , {'''TESTING_MOCKED_DATALOADERS''': '''1'''} ) class UpperCamelCase_ (__A ): __magic_name__ = False @classmethod def _SCREAMING_SNAKE_CASE ( cls : List[str] ) -> str: super().setUpClass() UpperCAmelCase_ : Optional[int] = tempfile.mkdtemp() UpperCAmelCase_ : List[str] = os.path.join(cls._tmpdir , "default_config.yml" ) write_basic_config(save_location=cls.configPath ) UpperCAmelCase_ : Tuple = ["accelerate", "launch", "--config_file", cls.configPath] @classmethod def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ) -> Tuple: super().tearDownClass() shutil.rmtree(cls._tmpdir ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict: UpperCAmelCase_ : Optional[int] = f""" examples/by_feature/checkpointing.py --checkpointing_steps epoch --output_dir {self.tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "epoch_0" ) ) ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: UpperCAmelCase_ : List[Any] = f""" examples/by_feature/checkpointing.py --checkpointing_steps 1 --output_dir {self.tmpdir} """.split() UpperCAmelCase_ : List[str] = run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(self.tmpdir , "step_2" ) ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]: UpperCAmelCase_ : List[str] = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'epoch_0' )} """.split() UpperCAmelCase_ : int = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ ) self.assertNotIn("epoch 0:" , lowerCAmelCase_ ) self.assertIn("epoch 1:" , lowerCAmelCase_ ) def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any: UpperCAmelCase_ : Dict = f""" examples/by_feature/checkpointing.py --resume_from_checkpoint {os.path.join(self.tmpdir , 'step_2' )} """.split() UpperCAmelCase_ : Dict = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ ) if torch.cuda.is_available(): UpperCAmelCase_ : Optional[int] = torch.cuda.device_count() else: UpperCAmelCase_ : List[Any] = 1 if num_processes > 1: self.assertNotIn("epoch 0:" , lowerCAmelCase_ ) self.assertIn("epoch 1:" , lowerCAmelCase_ ) else: self.assertIn("epoch 0:" , lowerCAmelCase_ ) self.assertIn("epoch 1:" , lowerCAmelCase_ ) @slow def _SCREAMING_SNAKE_CASE ( self : Any ) -> str: UpperCAmelCase_ : int = "\n examples/by_feature/cross_validation.py\n --num_folds 2\n ".split() with mock.patch.dict(os.environ , {"TESTING_MOCKED_DATALOADERS": "0"} ): UpperCAmelCase_ : List[Any] = run_command(self._launch_args + testargs , return_stdout=lowerCAmelCase_ ) UpperCAmelCase_ : Dict = re.findall("({.+})" , lowerCAmelCase_ ) UpperCAmelCase_ : Tuple = [r for r in results if "accuracy" in r][-1] UpperCAmelCase_ : Dict = ast.literal_eval(lowerCAmelCase_ ) self.assertGreaterEqual(results["accuracy"] , 0.7_5 ) def _SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]: UpperCAmelCase_ : Any = ["examples/by_feature/multi_process_metrics.py"] run_command(self._launch_args + testargs ) @require_trackers @mock.patch.dict(os.environ , {"WANDB_MODE": "offline"} ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: with tempfile.TemporaryDirectory() as tmpdir: UpperCAmelCase_ : Optional[int] = f""" examples/by_feature/tracking.py --with_tracking --project_dir {tmpdir} """.split() run_command(self._launch_args + testargs ) self.assertTrue(os.path.exists(os.path.join(lowerCAmelCase_ , "tracking" ) ) ) def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple: UpperCAmelCase_ : Dict = ["examples/by_feature/gradient_accumulation.py"] run_command(self._launch_args + testargs ) def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: UpperCAmelCase_ : int = ["examples/by_feature/local_sgd.py"] run_command(self._launch_args + testargs )
95
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : torch.FloatTensor SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase__ ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) ) return torch.tensor(lowercase__ , dtype=torch.floataa ) class _lowercase ( A__ , A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 1 @register_to_config def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]: if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None: __SCREAMING_SNAKE_CASE : Optional[int] = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one'''] if trained_betas is not None: __SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "linear": __SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __SCREAMING_SNAKE_CASE : List[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas __SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. __SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution __SCREAMING_SNAKE_CASE : Any = 1.0 # setable values __SCREAMING_SNAKE_CASE : Optional[Any] = None __SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) ) def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor: return sample def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' f''' maximal {self.config.num_train_timesteps} timesteps.''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps __SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa ) __SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ ) self.timesteps += self.config.steps_offset def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) __SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process __SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep] __SCREAMING_SNAKE_CASE : str = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) __SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 __SCREAMING_SNAKE_CASE : List[Any] = model_output elif self.config.prediction_type == "sample": __SCREAMING_SNAKE_CASE : List[str] = model_output __SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": __SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output __SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: __SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __len__( self :Optional[int] ) -> List[Any]: return self.config.num_train_timesteps
696
0
"""simple docstring""" from __future__ import annotations import requests __lowerCamelCase = set( 'approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports'.split() ) def a ( __UpperCAmelCase : str , __UpperCAmelCase : int = 1 , __UpperCAmelCase : str = "new" , __UpperCAmelCase : list | None = None ) -> dict: __magic_name__: Optional[Any] = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(__UpperCAmelCase ) - valid_terms ) ): __magic_name__: Optional[int] = f'Invalid search term: {invalid_search_terms}' raise ValueError(__UpperCAmelCase ) __magic_name__: Optional[int] = requests.get( f'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={"""User-agent""": """A random string"""} , ) if response.status_code == 4_2_9: raise requests.HTTPError __magic_name__: Optional[int] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(__UpperCAmelCase )} __magic_name__: str = {} for id_ in range(__UpperCAmelCase ): __magic_name__: str = { item: data["""data"""]["""children"""][id_]["""data"""][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('learnpython', wanted_data=['title', 'url', 'selftext']))
96
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} ) SCREAMING_SNAKE_CASE__ : str = "text" SCREAMING_SNAKE_CASE__ : str = "summary" @property def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
696
0
from __future__ import annotations import collections import pprint from pathlib import Path def a ( snake_case__: str ): '''simple docstring''' return "".join(sorted(snake_case__ ) ) def a ( snake_case__: str ): '''simple docstring''' return word_by_signature[signature(snake_case__ )] __a = Path(__file__).parent.joinpath('words.txt').read_text(encoding='utf-8') __a = sorted({word.strip().lower() for word in data.splitlines()}) __a = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": __a = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open('anagrams.txt', 'w') as file: file.write('all_anagrams = \n ') file.write(pprint.pformat(all_anagrams))
97
def _UpperCamelCase ( lowercase__ = 10**9 ): __SCREAMING_SNAKE_CASE : List[str] = 1 __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Dict = 0 __SCREAMING_SNAKE_CASE : Any = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
696
0
'''simple docstring''' import json import os import tempfile import transformers import datasets from utils import generate_example_dataset, get_duration lowercase__ : Optional[int] = 50_00_00 lowercase__ , lowercase__ : List[str] = os.path.split(__file__) lowercase__ : str = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json')) @get_duration def a__ ( lowercase : datasets.Dataset, **lowercase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCamelCase = dataset.map(**lowercase ) @get_duration def a__ ( lowercase : datasets.Dataset, **lowercase : Optional[Any] ) -> str: """simple docstring""" _UpperCamelCase = dataset.filter(**lowercase ) def a__ ( ) -> Any: """simple docstring""" _UpperCamelCase = {'''num examples''': SPEED_TEST_N_EXAMPLES} with tempfile.TemporaryDirectory() as tmp_dir: _UpperCamelCase = datasets.Features({'''text''': datasets.Value('''string''' ), '''numbers''': datasets.Value('''float32''' )} ) _UpperCamelCase = generate_example_dataset( os.path.join(lowercase, '''dataset.arrow''' ), lowercase, num_examples=lowercase ) _UpperCamelCase = transformers.AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=lowercase ) def tokenize(lowercase : List[Any] ): return tokenizer(examples['''text'''] ) _UpperCamelCase = map(lowercase ) _UpperCamelCase = map(lowercase, batched=lowercase ) _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) with dataset.formatted_as(type='''numpy''' ): _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) with dataset.formatted_as(type='''pandas''' ): _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) with dataset.formatted_as(type='''torch''', columns='''numbers''' ): _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) with dataset.formatted_as(type='''tensorflow''', columns='''numbers''' ): _UpperCamelCase = map(lowercase, function=lambda lowercase : None, batched=lowercase ) _UpperCamelCase = map(lowercase, function=lowercase, batched=lowercase ) _UpperCamelCase = filter(lowercase ) # Activate later when tokenizer support batched inputs # with dataset.formatted_as(type='numpy'): # times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True) with open(lowercase, '''wb''' ) as f: f.write(json.dumps(lowercase ).encode('''utf-8''' ) ) if __name__ == "__main__": # useful to run the profiler benchmark_map_filter()
98
def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : str = len(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __SCREAMING_SNAKE_CASE : str = True for i in range(lowercase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __SCREAMING_SNAKE_CASE : Union[str, Any] = True if a[i].islower(): __SCREAMING_SNAKE_CASE : Union[str, Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
696
0
from collections.abc import Generator def a (): __a , __a = 0, 1 while True: __a , __a = b, a + b yield b def a (lowerCAmelCase__ = 1_000 ): __a = 1 __a = fibonacci_generator() while len(str(next(lowerCAmelCase__ ) ) ) < n: answer += 1 return answer + 1 if __name__ == "__main__": print(solution(int(str(input()).strip())))
99
from scipy.stats import pearsonr import datasets __lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' __lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' __lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): '''simple docstring''' def __magic_name__( self :Optional[int] ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int: if return_pvalue: __SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
696
0
import unittest from transformers import AutoTokenizer, FalconConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FalconForCausalLM, FalconForQuestionAnswering, FalconForSequenceClassification, FalconForTokenClassification, FalconModel, ) class __snake_case : '''simple docstring''' def __init__( self , A_ , A_=3 , A_=7 , A_=True , A_=True , A_=False , A_=True , A_=99 , A_=32 , A_=5 , A_=4 , A_=37 , A_="gelu" , A_=0.1 , A_=0.1 , A_=5_12 , A_=16 , A_=2 , A_=0.02 , A_=3 , A_=4 , A_=None , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = parent SCREAMING_SNAKE_CASE__ = batch_size SCREAMING_SNAKE_CASE__ = seq_length SCREAMING_SNAKE_CASE__ = is_training SCREAMING_SNAKE_CASE__ = use_input_mask SCREAMING_SNAKE_CASE__ = use_token_type_ids SCREAMING_SNAKE_CASE__ = use_labels SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = type_sequence_label_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = num_labels SCREAMING_SNAKE_CASE__ = num_choices SCREAMING_SNAKE_CASE__ = scope def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) SCREAMING_SNAKE_CASE__ = None if self.use_input_mask: SCREAMING_SNAKE_CASE__ = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None SCREAMING_SNAKE_CASE__ = None if self.use_labels: SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size] , self.num_choices ) SCREAMING_SNAKE_CASE__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self ): '''simple docstring''' return FalconConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A_ , initializer_range=self.initializer_range , pad_token_id=1 , new_decoder_architecture=A_ , ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = FalconModel(config=A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ ) SCREAMING_SNAKE_CASE__ = model(A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = FalconModel(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , ) SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , ) SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase_ ( self , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , A_ , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = FalconForCausalLM(config=A_ ) model.to(A_ ) model.eval() # first forward pass SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , use_cache=A_ , ) SCREAMING_SNAKE_CASE__ = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) SCREAMING_SNAKE_CASE__ = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and SCREAMING_SNAKE_CASE__ = torch.cat([input_ids, next_tokens] , dim=-1 ) SCREAMING_SNAKE_CASE__ = torch.cat([input_mask, next_mask] , dim=-1 ) SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , output_hidden_states=A_ , )['''hidden_states'''][0] SCREAMING_SNAKE_CASE__ = model( A_ , attention_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , past_key_values=A_ , output_hidden_states=A_ , )['''hidden_states'''][0] # select random slice SCREAMING_SNAKE_CASE__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() SCREAMING_SNAKE_CASE__ = output_from_no_past[:, -3:, random_slice_idx].detach() SCREAMING_SNAKE_CASE__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A_ , A_ , atol=1E-3 ) ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ( SCREAMING_SNAKE_CASE__ ) , ) = config_and_inputs SCREAMING_SNAKE_CASE__ = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __snake_case ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' lowerCamelCase__ : Dict = ( ( FalconModel, FalconForCausalLM, FalconForSequenceClassification, FalconForTokenClassification, FalconForQuestionAnswering, ) if is_torch_available() else () ) lowerCamelCase__ : Dict = (FalconForCausalLM,) if is_torch_available() else () lowerCamelCase__ : List[Any] = ( { """feature-extraction""": FalconModel, """text-classification""": FalconForSequenceClassification, """text-generation""": FalconForCausalLM, """question-answering""": FalconForQuestionAnswering, """token-classification""": FalconForTokenClassification, """zero-shot""": FalconForSequenceClassification, } if is_torch_available() else {} ) lowerCamelCase__ : List[Any] = False lowerCamelCase__ : Tuple = False def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = FalconModelTester(self ) SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=A_ , hidden_size=37 ) def lowercase_ ( self ): '''simple docstring''' self.config_tester.run_common_tests() def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs() for alibi in [True, False]: SCREAMING_SNAKE_CASE__ = alibi self.model_tester.create_and_check_model(A_ , *A_ ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = input_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = '''single_label_classification''' SCREAMING_SNAKE_CASE__ = input_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ ) SCREAMING_SNAKE_CASE__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = input_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ = FalconForCausalLM(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , use_cache=A_ ) SCREAMING_SNAKE_CASE__ = input_ids.shape[0] SCREAMING_SNAKE_CASE__ = model._convert_to_rw_cache(result.past_key_values ) SCREAMING_SNAKE_CASE__ = model._convert_cache_to_standard_format(A_ , A_ ) for layer in range(len(A_ ) ): for tensor_idx in range(2 ): self.assertTrue(rw_cache[layer][tensor_idx].ndim == 3 ) self.assertTrue(result.past_key_values[layer][tensor_idx].ndim == 4 ) self.assertTrue( torch.all(result.past_key_values[layer][tensor_idx] == standard_cache[layer][tensor_idx] ) ) def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ = 3 SCREAMING_SNAKE_CASE__ = '''multi_label_classification''' SCREAMING_SNAKE_CASE__ = input_dict['''input_ids'''] SCREAMING_SNAKE_CASE__ = input_ids.ne(1 ).to(A_ ) SCREAMING_SNAKE_CASE__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) SCREAMING_SNAKE_CASE__ = FalconForSequenceClassification(A_ ) model.to(A_ ) model.eval() SCREAMING_SNAKE_CASE__ = model(A_ , attention_mask=A_ , labels=A_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def lowercase_ ( self ): '''simple docstring''' for model_class in self.all_generative_model_classes: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common() # If it doesn't support cache, pass the test if not hasattr(A_ , '''use_cache''' ): return SCREAMING_SNAKE_CASE__ = model_class(A_ ).to(A_ ) if "use_cache" not in inputs: SCREAMING_SNAKE_CASE__ = True SCREAMING_SNAKE_CASE__ = model(**A_ ) # If "past_key_values" is not returned, pass the test (e.g. RWKV uses a different cache name and format) if "past_key_values" not in outputs: return SCREAMING_SNAKE_CASE__ = ( getattr(A_ , '''decoder_layers''' , A_ ) or getattr(A_ , '''num_decoder_layers''' , A_ ) or config.num_hidden_layers ) SCREAMING_SNAKE_CASE__ = getattr(A_ , '''num_kv_heads''' , config.num_attention_heads ) SCREAMING_SNAKE_CASE__ = getattr(A_ , '''d_model''' , config.hidden_size ) SCREAMING_SNAKE_CASE__ = embed_dim // num_attention_heads SCREAMING_SNAKE_CASE__ = outputs['''past_key_values'''] self.assertEqual(len(A_ ) , A_ ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = inputs['''input_ids'''].shape for i in range(A_ ): if config.new_decoder_architecture: SCREAMING_SNAKE_CASE__ = config.num_attention_heads elif config.multi_query: SCREAMING_SNAKE_CASE__ = 1 self.assertEqual(len(past_kv[0] ) , 2 ) # K V for the decoder = 2 self.assertEqual( past_kv[i][0].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) self.assertEqual( past_kv[i][1].shape , (batch_size, num_attention_heads, seq_length, per_head_embed_dim) ) @require_torch class __snake_case ( unittest.TestCase ): '''simple docstring''' @slow def lowercase_ ( self ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained('''Rocketknight1/falcon-rw-1b''' ) model.eval() model.to(A_ ) SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ ) SCREAMING_SNAKE_CASE__ = ( '''My favorite food is pizza. I love it so much that I have a pizza party every year for my birthday.''' ) SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=19 ) SCREAMING_SNAKE_CASE__ = tokenizer.batch_decode(A_ )[0] self.assertEqual(A_ , A_ ) @slow def lowercase_ ( self ): '''simple docstring''' for repo in ["Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b"]: SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ ) SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ ) model.eval() model.to(A_ ) SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ ) # We just test that these run without errors - the models are randomly initialized # and so the actual text outputs will be garbage model.generate(**A_ , do_sample=A_ , max_new_tokens=4 ) model.generate(**A_ , do_sample=A_ , max_new_tokens=4 ) model.generate(**A_ , num_beams=2 , max_new_tokens=4 ) @slow def lowercase_ ( self ): '''simple docstring''' with torch.no_grad(): for repo in [ "Rocketknight1/falcon-rw-1b", "Rocketknight1/tiny-random-falcon-7b", "Rocketknight1/tiny-random-falcon-40b", ]: SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained(A_ ) SCREAMING_SNAKE_CASE__ = FalconForCausalLM.from_pretrained(A_ ) model.eval() model.to(device=A_ ) SCREAMING_SNAKE_CASE__ = tokenizer('''My favorite food is''' , return_tensors='''pt''' ).to(A_ ) # Test results are the same with and without cache SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ ) SCREAMING_SNAKE_CASE__ = model.generate(**A_ , do_sample=A_ , max_new_tokens=20 , use_cache=A_ ) self.assertTrue((outputs_cache - outputs_no_cache).sum().item() == 0 )
100
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer __lowerCAmelCase : List[str] =logging.get_logger(__name__) __lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : int ={ 'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'}, 'tokenizer_file': { 'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json' }, } __lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2} __lowerCAmelCase : Union[str, Any] ={} class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]: super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars ): __SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case __SCREAMING_SNAKE_CASE : str = strip_accents __SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars __SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = do_lower_case def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple: __SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: __SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ )
696
0
import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def a__ ( A__, A__=0.9_99, A__="cosine", ): if alpha_transform_type == "cosine": def alpha_bar_fn(A__ ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(A__ ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) SCREAMING_SNAKE_CASE_ : str = [] for i in range(A__ ): SCREAMING_SNAKE_CASE_ : List[Any] = i / num_diffusion_timesteps SCREAMING_SNAKE_CASE_ : Dict = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(A__ ) / alpha_bar_fn(A__ ), A__ ) ) return torch.tensor(A__, dtype=torch.floataa ) class __lowercase (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): """simple docstring""" _UpperCAmelCase = [e.name for e in KarrasDiffusionSchedulers] _UpperCAmelCase = 2 @register_to_config def __init__( self , lowerCAmelCase__ = 1_0_0_0 , lowerCAmelCase__ = 0.00_085 , lowerCAmelCase__ = 0.012 , lowerCAmelCase__ = "linear" , lowerCAmelCase__ = None , lowerCAmelCase__ = "epsilon" , lowerCAmelCase__ = "linspace" , lowerCAmelCase__ = 0 , ): """simple docstring""" if trained_betas is not None: SCREAMING_SNAKE_CASE_ : Optional[int] = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "linear": SCREAMING_SNAKE_CASE_ : Optional[int] = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. SCREAMING_SNAKE_CASE_ : Any = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule SCREAMING_SNAKE_CASE_ : str = betas_for_alpha_bar(lowerCAmelCase__ ) else: raise NotImplementedError(F'''{beta_schedule} does is not implemented for {self.__class__}''' ) SCREAMING_SNAKE_CASE_ : Optional[int] = 1.0 - self.betas SCREAMING_SNAKE_CASE_ : List[str] = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__=None ): """simple docstring""" if schedule_timesteps is None: SCREAMING_SNAKE_CASE_ : List[Any] = self.timesteps SCREAMING_SNAKE_CASE_ : Any = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: SCREAMING_SNAKE_CASE_ : Optional[int] = 1 if len(lowerCAmelCase__ ) > 1 else 0 else: SCREAMING_SNAKE_CASE_ : Union[str, Any] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep SCREAMING_SNAKE_CASE_ : Dict = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCamelCase__ ( self ): """simple docstring""" if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.index_for_timestep(lowerCAmelCase__ ) if self.state_in_first_order: SCREAMING_SNAKE_CASE_ : Optional[Any] = self.sigmas[step_index] else: SCREAMING_SNAKE_CASE_ : List[Any] = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE_ : Any = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ = None , lowerCAmelCase__ = None , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Union[str, Any] = num_inference_steps SCREAMING_SNAKE_CASE_ : Any = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": SCREAMING_SNAKE_CASE_ : List[Any] = np.linspace(0 , num_train_timesteps - 1 , lowerCAmelCase__ , dtype=lowerCAmelCase__ )[::-1].copy() elif self.config.timestep_spacing == "leading": SCREAMING_SNAKE_CASE_ : Tuple = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE_ : Any = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round()[::-1].copy().astype(lowerCAmelCase__ ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": SCREAMING_SNAKE_CASE_ : Optional[Any] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 SCREAMING_SNAKE_CASE_ : Optional[Any] = (np.arange(lowerCAmelCase__ , 0 , -step_ratio )).round().copy().astype(lowerCAmelCase__ ) timesteps -= 1 else: raise ValueError( F'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) SCREAMING_SNAKE_CASE_ : Optional[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) SCREAMING_SNAKE_CASE_ : Any = torch.from_numpy(np.log(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.interp(lowerCAmelCase__ , np.arange(0 , len(lowerCAmelCase__ ) ) , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[str] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.from_numpy(lowerCAmelCase__ ).to(device=lowerCAmelCase__ ) # interpolate sigmas SCREAMING_SNAKE_CASE_ : Union[str, Any] = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() SCREAMING_SNAKE_CASE_ : Dict = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) SCREAMING_SNAKE_CASE_ : int = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(lowerCAmelCase__ ).startswith('mps' ): # mps does not support float64 SCREAMING_SNAKE_CASE_ : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE_ : str = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ ) # interpolate timesteps SCREAMING_SNAKE_CASE_ : Tuple = self.sigma_to_t(lowerCAmelCase__ ).to(lowerCAmelCase__ , dtype=timesteps.dtype ) SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() SCREAMING_SNAKE_CASE_ : Tuple = torch.cat([timesteps[:1], interleaved_timesteps] ) SCREAMING_SNAKE_CASE_ : List[str] = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter SCREAMING_SNAKE_CASE_ : List[Any] = defaultdict(lowerCAmelCase__ ) def UpperCamelCase__ ( self , lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = sigma.log() # get distribution SCREAMING_SNAKE_CASE_ : List[str] = log_sigma - self.log_sigmas[:, None] # get sigmas range SCREAMING_SNAKE_CASE_ : Dict = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) SCREAMING_SNAKE_CASE_ : List[str] = low_idx + 1 SCREAMING_SNAKE_CASE_ : Dict = self.log_sigmas[low_idx] SCREAMING_SNAKE_CASE_ : Any = self.log_sigmas[high_idx] # interpolate sigmas SCREAMING_SNAKE_CASE_ : Optional[int] = (low - log_sigma) / (low - high) SCREAMING_SNAKE_CASE_ : Any = w.clamp(0 , 1 ) # transform interpolation to time range SCREAMING_SNAKE_CASE_ : Optional[Any] = (1 - w) * low_idx + w * high_idx SCREAMING_SNAKE_CASE_ : List[Any] = t.view(sigma.shape ) return t @property def UpperCamelCase__ ( self ): """simple docstring""" return self.sample is None def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = True , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.index_for_timestep(lowerCAmelCase__ ) # advance index counter by 1 SCREAMING_SNAKE_CASE_ : List[str] = timestep.cpu().item() if torch.is_tensor(lowerCAmelCase__ ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.sigmas[step_index] SCREAMING_SNAKE_CASE_ : Dict = self.sigmas_interpol[step_index + 1] SCREAMING_SNAKE_CASE_ : str = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method SCREAMING_SNAKE_CASE_ : int = self.sigmas[step_index - 1] SCREAMING_SNAKE_CASE_ : Tuple = self.sigmas_interpol[step_index] SCREAMING_SNAKE_CASE_ : int = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API SCREAMING_SNAKE_CASE_ : str = 0 SCREAMING_SNAKE_CASE_ : int = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": SCREAMING_SNAKE_CASE_ : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE_ : str = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": SCREAMING_SNAKE_CASE_ : List[Any] = sigma_hat if self.state_in_first_order else sigma_interpol SCREAMING_SNAKE_CASE_ : List[str] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample' ) else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order SCREAMING_SNAKE_CASE_ : Optional[Any] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep SCREAMING_SNAKE_CASE_ : Optional[Any] = sigma_interpol - sigma_hat # store for 2nd order step SCREAMING_SNAKE_CASE_ : Any = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order SCREAMING_SNAKE_CASE_ : int = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep SCREAMING_SNAKE_CASE_ : Optional[Any] = sigma_next - sigma_hat SCREAMING_SNAKE_CASE_ : str = self.sample SCREAMING_SNAKE_CASE_ : int = None SCREAMING_SNAKE_CASE_ : List[Any] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase__ ) def UpperCamelCase__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[int] = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowerCAmelCase__ ): # mps does not support float64 SCREAMING_SNAKE_CASE_ : List[str] = self.timesteps.to(original_samples.device , dtype=torch.floataa ) SCREAMING_SNAKE_CASE_ : str = timesteps.to(original_samples.device , dtype=torch.floataa ) else: SCREAMING_SNAKE_CASE_ : Tuple = self.timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE_ : List[str] = timesteps.to(original_samples.device ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = [self.index_for_timestep(lowerCAmelCase__ , lowerCAmelCase__ ) for t in timesteps] SCREAMING_SNAKE_CASE_ : Optional[Any] = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): SCREAMING_SNAKE_CASE_ : Dict = sigma.unsqueeze(-1 ) SCREAMING_SNAKE_CASE_ : List[str] = original_samples + noise * sigma return noisy_samples def __len__( self ): """simple docstring""" return self.config.num_train_timesteps
101
import os def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] ) __SCREAMING_SNAKE_CASE : str = len(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : Any = 0 __SCREAMING_SNAKE_CASE : Dict = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(lowercase__ ): for j in range(n_rows - 3 ): __SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] __SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: __SCREAMING_SNAKE_CASE : Any = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: __SCREAMING_SNAKE_CASE : Any = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) __SCREAMING_SNAKE_CASE : Optional[int] = max( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if max_product > largest: __SCREAMING_SNAKE_CASE : Tuple = max_product return largest def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Optional[int] = [] with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file: for line in file: grid.append(line.strip('''\n''' ).split(''' ''' ) ) __SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )] return largest_product(lowercase__ ) if __name__ == "__main__": print(solution())
696
0
"""simple docstring""" from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging __magic_name__ : int = logging.get_logger(__name__) class lowercase__ : """simple docstring""" __lowerCAmelCase : str __lowerCAmelCase : str = None @staticmethod def _a ( ): '''simple docstring''' raise NotImplementedError def _a ( self , _A , _A , _A , **_A ): '''simple docstring''' raise NotImplementedError def _a ( self , _A ): '''simple docstring''' raise NotImplementedError def _a ( self ): '''simple docstring''' if not self.is_available(): raise RuntimeError( f"""You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.""" ) @classmethod def _a ( cls ): '''simple docstring''' return f"""`pip install {cls.pip_package or cls.name}`""" class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase : Optional[Any] = """optuna""" @staticmethod def _a ( ): '''simple docstring''' return is_optuna_available() def _a ( self , _A , _A , _A , **_A ): '''simple docstring''' return run_hp_search_optuna(_A , _A , _A , **_A ) def _a ( self , _A ): '''simple docstring''' return default_hp_space_optuna(_A ) class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase : Any = """ray""" __lowerCAmelCase : Optional[int] = """'ray[tune]'""" @staticmethod def _a ( ): '''simple docstring''' return is_ray_available() def _a ( self , _A , _A , _A , **_A ): '''simple docstring''' return run_hp_search_ray(_A , _A , _A , **_A ) def _a ( self , _A ): '''simple docstring''' return default_hp_space_ray(_A ) class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase : Tuple = """sigopt""" @staticmethod def _a ( ): '''simple docstring''' return is_sigopt_available() def _a ( self , _A , _A , _A , **_A ): '''simple docstring''' return run_hp_search_sigopt(_A , _A , _A , **_A ) def _a ( self , _A ): '''simple docstring''' return default_hp_space_sigopt(_A ) class lowercase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase : Optional[Any] = """wandb""" @staticmethod def _a ( ): '''simple docstring''' return is_wandb_available() def _a ( self , _A , _A , _A , **_A ): '''simple docstring''' return run_hp_search_wandb(_A , _A , _A , **_A ) def _a ( self , _A ): '''simple docstring''' return default_hp_space_wandb(_A ) __magic_name__ : Dict = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def UpperCamelCase (): UpperCamelCase : str = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(SCREAMING_SNAKE_CASE ) > 0: UpperCamelCase : Dict = available_backends[0].name if len(SCREAMING_SNAKE_CASE ) > 1: logger.info( f"""{len(SCREAMING_SNAKE_CASE )} hyperparameter search backends available. Using {name} as the default.""" ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( f""" - To install {backend.name} run {backend.pip_install()}""" for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
102
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _lowercase ( A__ ): '''simple docstring''' def __magic_name__( self :List[Any] ) -> Any: __SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) ) class _lowercase : '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str: __SCREAMING_SNAKE_CASE : Any = parent __SCREAMING_SNAKE_CASE : Dict = batch_size __SCREAMING_SNAKE_CASE : List[Any] = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = image_size __SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier __SCREAMING_SNAKE_CASE : Dict = min_depth __SCREAMING_SNAKE_CASE : List[str] = tf_padding __SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier ) __SCREAMING_SNAKE_CASE : List[str] = output_stride __SCREAMING_SNAKE_CASE : Any = hidden_act __SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = is_training __SCREAMING_SNAKE_CASE : Optional[int] = num_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range __SCREAMING_SNAKE_CASE : Optional[int] = scope def __magic_name__( self :List[str] ) -> int: __SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_labels: __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __SCREAMING_SNAKE_CASE : Any = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__( self :Union[str, Any] ) -> Optional[Any]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Tuple = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self :List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowercase ( A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Optional[Any] = ( {'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Tuple = False def __magic_name__( self :Any ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self ) __SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def __magic_name__( self :Dict ) -> Optional[Any]: pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def __magic_name__( self :List[Any] ) -> List[Any]: pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def __magic_name__( self :Any ) -> Dict: pass def __magic_name__( self :Any ) -> List[Any]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __magic_name__( self :Any ) -> Tuple: __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __magic_name__( self :Union[str, Any] ) -> Tuple: def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ): __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states __SCREAMING_SNAKE_CASE : Optional[int] = 26 self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : str = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE : List[Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def __magic_name__( self :List[str] ) -> List[Any]: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): '''simple docstring''' @cached_property def __magic_name__( self :Optional[int] ) -> Union[str, Any]: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__( self :Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor __SCREAMING_SNAKE_CASE : int = prepare_img() __SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): __SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ ) # verify the logits __SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
696
0
"""simple docstring""" import sys snake_case = ( '''73167176531330624919225119674426574742355349194934''' '''96983520312774506326239578318016984801869478851843''' '''85861560789112949495459501737958331952853208805511''' '''12540698747158523863050715693290963295227443043557''' '''66896648950445244523161731856403098711121722383113''' '''62229893423380308135336276614282806444486645238749''' '''30358907296290491560440772390713810515859307960866''' '''70172427121883998797908792274921901699720888093776''' '''65727333001053367881220235421809751254540594752243''' '''52584907711670556013604839586446706324415722155397''' '''53697817977846174064955149290862569321978468622482''' '''83972241375657056057490261407972968652414535100474''' '''82166370484403199890008895243450658541227588666881''' '''16427171479924442928230863465674813919123162824586''' '''17866458359124566529476545682848912883142607690042''' '''24219022671055626321111109370544217506941658960408''' '''07198403850962455444362981230987879927244284909188''' '''84580156166097919133875499200524063689912560717606''' '''05886116467109405077541002256983155200055935729725''' '''71636269561882670428252483600823257530420752963450''' ) def snake_case ( lowerCAmelCase_ ) -> int: _snake_case = 1 for digit in s: product *= int(lowerCAmelCase_ ) return product def snake_case ( lowerCAmelCase_ = N ) -> int: _snake_case = -sys.maxsize - 1 _snake_case = n[:13] _snake_case = 13 while cur_index < len(lowerCAmelCase_ ) - 13: if int(n[cur_index] ) >= int(substr[0] ): _snake_case = substr[1:] + n[cur_index] cur_index += 1 else: _snake_case = max(lowerCAmelCase_ , str_eval(lowerCAmelCase_ ) ) _snake_case = n[cur_index : cur_index + 13] cur_index += 13 return largest_product if __name__ == "__main__": print(F"{solution() = }")
103
import os from datetime import datetime as dt from github import Github __lowerCAmelCase : List[Any] =[ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' ) for issue in open_issues: __SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
696
0
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : List[str], UpperCAmelCase_ : Optional[int], UpperCAmelCase_ : List[Any], UpperCAmelCase_ : int ) -> Optional[Any]: """simple docstring""" A__ = StableDiffusionPipeline.from_pretrained(UpperCAmelCase_, torch_dtype=torch.floataa ) # load LoRA weight from .safetensors A__ = load_file(UpperCAmelCase_ ) A__ = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: A__ = key.split("." )[0].split(LORA_PREFIX_TEXT_ENCODER + "_" )[-1].split("_" ) A__ = pipeline.text_encoder else: A__ = key.split("." )[0].split(LORA_PREFIX_UNET + "_" )[-1].split("_" ) A__ = pipeline.unet # find the target layer A__ = layer_infos.pop(0 ) while len(UpperCAmelCase_ ) > -1: try: A__ = curr_layer.__getattr__(UpperCAmelCase_ ) if len(UpperCAmelCase_ ) > 0: A__ = layer_infos.pop(0 ) elif len(UpperCAmelCase_ ) == 0: break except Exception: if len(UpperCAmelCase_ ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: A__ = layer_infos.pop(0 ) A__ = [] if "lora_down" in key: pair_keys.append(key.replace("lora_down", "lora_up" ) ) pair_keys.append(UpperCAmelCase_ ) else: pair_keys.append(UpperCAmelCase_ ) pair_keys.append(key.replace("lora_up", "lora_down" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: A__ = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) A__ = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_, UpperCAmelCase_ ).unsqueeze(2 ).unsqueeze(3 ) else: A__ = state_dict[pair_keys[0]].to(torch.floataa ) A__ = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(UpperCAmelCase_, UpperCAmelCase_ ) # update visited list for item in pair_keys: visited.append(UpperCAmelCase_ ) return pipeline if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( """--base_model_path""", default=None, type=str, required=True, help="""Path to the base model in diffusers format.""" ) parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") parser.add_argument( """--lora_prefix_unet""", default="""lora_unet""", type=str, help="""The prefix of UNet weight in safetensors""" ) parser.add_argument( """--lora_prefix_text_encoder""", default="""lora_te""", type=str, help="""The prefix of text encoder weight in safetensors""", ) parser.add_argument("""--alpha""", default=0.75, type=float, help="""The merging ratio in W = W0 + alpha * deltaW""") parser.add_argument( """--to_safetensors""", action="""store_true""", help="""Whether to store pipeline in safetensors format or not.""" ) parser.add_argument("""--device""", type=str, help="""Device to use (e.g. cpu, cuda:0, cuda:1, etc.)""") UpperCamelCase = parser.parse_args() UpperCamelCase = args.base_model_path UpperCamelCase = args.checkpoint_path UpperCamelCase = args.dump_path UpperCamelCase = args.lora_prefix_unet UpperCamelCase = args.lora_prefix_text_encoder UpperCamelCase = args.alpha UpperCamelCase = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) UpperCamelCase = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
104
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : Dict ={ 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = '''canine''' def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size __SCREAMING_SNAKE_CASE : str = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act __SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Dict = initializer_range __SCREAMING_SNAKE_CASE : int = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps # Character config: __SCREAMING_SNAKE_CASE : Tuple = downsampling_rate __SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size __SCREAMING_SNAKE_CASE : Any = num_hash_functions __SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets __SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
696
0
import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch) # also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml # same for Vicuna-13b from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipImageProcessor, InstructBlipConfig, InstructBlipForConditionalGeneration, InstructBlipProcessor, InstructBlipQFormerConfig, InstructBlipVisionConfig, LlamaConfig, LlamaTokenizerFast, TaConfig, TaTokenizerFast, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def __UpperCAmelCase ( ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg' SCREAMING_SNAKE_CASE_ : Optional[Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert('RGB' ) return image def __UpperCAmelCase ( lowerCamelCase_ : int ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : int = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) ) rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') ) rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') ) # fmt: on return rename_keys def __UpperCAmelCase ( lowerCamelCase_ : Tuple , lowerCamelCase_ : Tuple , lowerCamelCase_ : Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : List[Any] = dct.pop(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : List[Any] = val def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : Optional[Any] ) -> Optional[Any]: """simple docstring""" for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases SCREAMING_SNAKE_CASE_ : str = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' ) SCREAMING_SNAKE_CASE_ : Dict = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' ) # next, set bias in the state dict SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.cat((q_bias, torch.zeros_like(lowerCamelCase_ , requires_grad=lowerCamelCase_ ), v_bias) ) SCREAMING_SNAKE_CASE_ : List[str] = qkv_bias def __UpperCAmelCase ( lowerCamelCase_ : Dict ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE_ : Optional[Any] = 3_64 if 'coco' in model_name else 2_24 SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipVisionConfig(image_size=lowerCamelCase_ ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "t5-xl" in model_name: SCREAMING_SNAKE_CASE_ : Union[str, Any] = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: SCREAMING_SNAKE_CASE_ : Optional[Any] = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "vicuna-7b" in model_name: SCREAMING_SNAKE_CASE_ : List[Any] = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_20_01 ).to_dict() elif "vicuna-13b" in model_name: SCREAMING_SNAKE_CASE_ : Dict = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_20_01 ).to_dict() else: raise ValueError('Model name not supported' ) # the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1 SCREAMING_SNAKE_CASE_ : Optional[int] = InstructBlipQFormerConfig(vocab_size=3_05_23 ).to_dict() SCREAMING_SNAKE_CASE_ : List[Any] = InstructBlipConfig(vision_config=lowerCamelCase_ , text_config=lowerCamelCase_ , qformer_config=lowerCamelCase_ ) return config, image_size @torch.no_grad() def __UpperCAmelCase ( lowerCamelCase_ : Dict , lowerCamelCase_ : Any=None , lowerCamelCase_ : Any=False ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' ) qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} ) if "t5" in model_name: SCREAMING_SNAKE_CASE_ : Optional[int] = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' ) elif "vicuna" in model_name: # the following was used in the original implementation: # tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left") # tokenizer.add_special_tokens({"pad_token": "[PAD]"}) # tokenizer.add_special_tokens({"bos_token": "</s>"}) # tokenizer.add_special_tokens({"eos_token": "</s>"}) # tokenizer.add_special_tokens({"unk_token": "</s>"}) SCREAMING_SNAKE_CASE_ : List[Any] = LlamaTokenizerFast.from_pretrained( 'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' ) tokenizer.add_special_tokens({'pad_token': '[PAD]'} ) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = get_blipa_config(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : int = InstructBlipForConditionalGeneration(lowerCamelCase_ ).eval() SCREAMING_SNAKE_CASE_ : str = { 'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'), 'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'), 'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'), 'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'), } SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[int] = model_name_to_original[model_name] # load original model print('Loading original model...' ) SCREAMING_SNAKE_CASE_ : Optional[int] = 'cuda:1' if torch.cuda.is_available() else 'cpu' SCREAMING_SNAKE_CASE_ : Any = 'cuda:2' if torch.cuda.is_available() else 'cpu' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = load_model_and_preprocess( name=lowerCamelCase_ , model_type=lowerCamelCase_ , is_eval=lowerCamelCase_ , device=lowerCamelCase_ ) original_model.eval() print('Done!' ) # update state dict keys SCREAMING_SNAKE_CASE_ : Optional[int] = original_model.state_dict() SCREAMING_SNAKE_CASE_ : Dict = create_rename_keys(lowerCamelCase_ ) for src, dest in rename_keys: rename_key(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): SCREAMING_SNAKE_CASE_ : Optional[int] = state_dict.pop(lowerCamelCase_ ) if key.startswith('Qformer.bert' ): SCREAMING_SNAKE_CASE_ : Tuple = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: SCREAMING_SNAKE_CASE_ : List[Any] = key.replace('self' , 'attention' ) if "llm_proj" in key: SCREAMING_SNAKE_CASE_ : Any = key.replace('llm_proj' , 'language_projection' ) if "t5_proj" in key: SCREAMING_SNAKE_CASE_ : str = key.replace('t5_proj' , 'language_projection' ) if key.startswith('llm_model' ): SCREAMING_SNAKE_CASE_ : Any = key.replace('llm_model' , 'language_model' ) if key.startswith('t5' ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = key.replace('t5' , 'language' ) SCREAMING_SNAKE_CASE_ : Optional[int] = val # read in qv biases read_in_q_v_bias(lowerCamelCase_ , lowerCamelCase_ ) # note: weights get loaded in torch.float32 by default hf_model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : str = load_demo_image() SCREAMING_SNAKE_CASE_ : Any = 'What is unusual about this image?' # create processor SCREAMING_SNAKE_CASE_ : Dict = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=lowerCamelCase_ , image_std=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Dict = InstructBlipProcessor( image_processor=lowerCamelCase_ , tokenizer=lowerCamelCase_ , qformer_tokenizer=lowerCamelCase_ , ) SCREAMING_SNAKE_CASE_ : List[Any] = processor(images=lowerCamelCase_ , text=lowerCamelCase_ , return_tensors='pt' ).to(lowerCamelCase_ ) # make sure processor creates exact same pixel values SCREAMING_SNAKE_CASE_ : str = vis_processors['eval'](lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : List[str] = inputs.pixel_values assert torch.allclose(original_pixel_values.to(pixel_values.device ) , lowerCamelCase_ ) original_model.to(lowerCamelCase_ ) hf_model.to(lowerCamelCase_ ) with torch.no_grad(): if "vicuna" in model_name: SCREAMING_SNAKE_CASE_ : str = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits SCREAMING_SNAKE_CASE_ : Tuple = hf_model(**lowerCamelCase_ ).logits else: SCREAMING_SNAKE_CASE_ : Any = original_model( {'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits SCREAMING_SNAKE_CASE_ : str = tokenizer('\n' , return_tensors='pt' ).input_ids.to(lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : str = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_00 ) SCREAMING_SNAKE_CASE_ : Dict = hf_model(**lowerCamelCase_ , labels=lowerCamelCase_ ).logits print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values assert original_logits.shape == logits.shape SCREAMING_SNAKE_CASE_ : List[Any] = 1E-4 if 'vicuna' in model_name else 1E-5 assert torch.allclose(original_logits.to(logits.device ) , lowerCamelCase_ , atol=lowerCamelCase_ ) print('Looks ok!' ) print('Generating with original model...' ) SCREAMING_SNAKE_CASE_ : List[Any] = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 ) # important: we need to cast the weights of the HF model to the appropriate type print('Generating with HF model...' ) SCREAMING_SNAKE_CASE_ : Union[str, Any] = hf_model.generate( **lowerCamelCase_ , do_sample=lowerCamelCase_ , num_beams=5 , max_length=2_56 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , ) if "vicuna" in model_name: # convert output id 0 to 2 (eos_token_id) # TODO add this in the generate method? SCREAMING_SNAKE_CASE_ : Optional[int] = 2 print('Original generation:' , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : List[Any] = processor.batch_decode(lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Optional[Any] = [text.strip() for text in output_text] print('HF generation:' , lowerCamelCase_ ) if pytorch_dump_folder_path is not None: processor.save_pretrained(lowerCamelCase_ ) hf_model.save_pretrained(lowerCamelCase_ ) if push_to_hub: processor.push_to_hub(F'Salesforce/{model_name}' ) hf_model.push_to_hub(F'Salesforce/{model_name}' ) if __name__ == "__main__": UpperCamelCase__ : int = argparse.ArgumentParser() UpperCamelCase__ : int = [ '''instructblip-vicuna-7b''', '''instructblip-vicuna-13b''', '''instructblip-flan-t5-xl''', '''instructblip-flan-t5-xxl''', ] parser.add_argument( '''--model_name''', default='''instructblip-flan-t5-xl''', choices=choices, type=str, help='''Path to hf config.json of model to convert''', ) parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model and processor to the hub after converting''', ) UpperCamelCase__ : Optional[Any] = parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
105
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : List[Any] =logging.get_logger(__name__) __lowerCAmelCase : Tuple ={ 'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl''' SCREAMING_SNAKE_CASE__ : List[str] = ['''mems'''] SCREAMING_SNAKE_CASE__ : List[Any] = { '''n_token''': '''vocab_size''', '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str: __SCREAMING_SNAKE_CASE : str = vocab_size __SCREAMING_SNAKE_CASE : Tuple = [] self.cutoffs.extend(lowerCAmelCase__ ) if proj_share_all_but_first: __SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs ) else: __SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs ) __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed __SCREAMING_SNAKE_CASE : Tuple = d_head __SCREAMING_SNAKE_CASE : Dict = d_inner __SCREAMING_SNAKE_CASE : Optional[Any] = div_val __SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm __SCREAMING_SNAKE_CASE : List[str] = n_layer __SCREAMING_SNAKE_CASE : int = n_head __SCREAMING_SNAKE_CASE : str = mem_len __SCREAMING_SNAKE_CASE : Union[str, Any] = same_length __SCREAMING_SNAKE_CASE : str = attn_type __SCREAMING_SNAKE_CASE : Dict = clamp_len __SCREAMING_SNAKE_CASE : Tuple = sample_softmax __SCREAMING_SNAKE_CASE : Optional[int] = adaptive __SCREAMING_SNAKE_CASE : int = dropout __SCREAMING_SNAKE_CASE : Optional[Any] = dropatt __SCREAMING_SNAKE_CASE : int = untie_r __SCREAMING_SNAKE_CASE : Optional[int] = init __SCREAMING_SNAKE_CASE : List[str] = init_range __SCREAMING_SNAKE_CASE : Any = proj_init_std __SCREAMING_SNAKE_CASE : List[str] = init_std __SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def __magic_name__( self :str ) -> int: # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
696
0
import math import tensorflow as tf from packaging import version def lowerCamelCase_ ( lowerCAmelCase__ : Optional[int] ) -> Any: '''simple docstring''' A = tf.convert_to_tensor(lowerCAmelCase__ ) A = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) )) return x * cdf def lowerCamelCase_ ( lowerCAmelCase__ : List[str] ) -> List[Any]: '''simple docstring''' A = tf.convert_to_tensor(lowerCAmelCase__ ) A = tf.cast(math.pi , x.dtype ) A = tf.cast(0.044715 , x.dtype ) A = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(lowerCAmelCase__ , 3 )) )) return x * cdf def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Optional[int]: '''simple docstring''' A = tf.convert_to_tensor(lowerCAmelCase__ ) return x * tf.tanh(tf.math.softplus(lowerCAmelCase__ ) ) def lowerCamelCase_ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[int]: '''simple docstring''' A = tf.convert_to_tensor(lowerCAmelCase__ ) A = tf.cast(0.044715 , x.dtype ) A = tf.cast(0.7978845608 , x.dtype ) return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) )) def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> List[str]: '''simple docstring''' A = tf.convert_to_tensor(lowerCAmelCase__ ) A = tf.cast(1.702 , x.dtype ) return x * tf.math.sigmoid(coeff * x ) def lowerCamelCase_ ( lowerCAmelCase__ : Optional[Any] ) -> Any: '''simple docstring''' return tf.clip_by_value(_gelu(lowerCAmelCase__ ) , -10 , 10 ) def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Tuple=-1 ) -> List[str]: '''simple docstring''' A , A = tf.split(lowerCAmelCase__ , 2 , axis=lowerCAmelCase__ ) return a * tf.math.sigmoid(lowerCAmelCase__ ) if version.parse(tf.version.VERSION) >= version.parse('2.4'): def lowerCamelCase_ ( lowerCAmelCase__ : str ) -> Optional[Any]: '''simple docstring''' return tf.keras.activations.gelu(lowerCAmelCase__ , approximate=lowerCAmelCase__ ) __snake_case :List[str] =tf.keras.activations.gelu __snake_case :Optional[Any] =approximate_gelu_wrap else: __snake_case :Any =_gelu __snake_case :str =_gelu_new __snake_case :List[Any] ={ 'gelu': gelu, 'gelu_10': gelu_aa, 'gelu_fast': gelu_fast, 'gelu_new': gelu_new, 'glu': glu, 'mish': mish, 'quick_gelu': quick_gelu, 'relu': tf.keras.activations.relu, 'sigmoid': tf.keras.activations.sigmoid, 'silu': tf.keras.activations.swish, 'swish': tf.keras.activations.swish, 'tanh': tf.keras.activations.tanh, } def lowerCamelCase_ ( lowerCAmelCase__ : List[Any] ) -> str: '''simple docstring''' if activation_string in ACTaFN: return ACTaFN[activation_string] else: raise KeyError(F'''function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}''' )
106
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : str =logging.get_logger(__name__) __lowerCAmelCase : Any ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert''' def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = vocab_size __SCREAMING_SNAKE_CASE : List[str] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : str = initializer_range __SCREAMING_SNAKE_CASE : Dict = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = position_embedding_type __SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
696
0
'''simple docstring''' from itertools import product from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros def _SCREAMING_SNAKE_CASE ( __snake_case : Union[str, Any] , __snake_case : List[Any] ): _A = k_size // 2 _A , _A = mgrid[0 - center : k_size - center, 0 - center : k_size - center] _A = 1 / (2 * pi * sigma) * exp(-(square(__snake_case ) + square(__snake_case )) / (2 * square(__snake_case )) ) return g def _SCREAMING_SNAKE_CASE ( __snake_case : Any , __snake_case : Optional[Any] , __snake_case : Tuple ): _A , _A = image.shape[0], image.shape[1] # dst image height and width _A = height - k_size + 1 _A = width - k_size + 1 # im2col, turn the k_size*k_size pixels into a row and np.vstack all rows _A = zeros((dst_height * dst_width, k_size * k_size) ) _A = 0 for i, j in product(range(__snake_case ) , range(__snake_case ) ): _A = ravel(image[i : i + k_size, j : j + k_size] ) _A = window row += 1 # turn the kernel into shape(k*k, 1) _A = gen_gaussian_kernel(__snake_case , __snake_case ) _A = ravel(__snake_case ) # reshape and get the dst image _A = dot(__snake_case , __snake_case ).reshape(__snake_case , __snake_case ).astype(__snake_case ) return dst if __name__ == "__main__": # read original image _UpperCAmelCase : List[Any] = imread(r'''../image_data/lena.jpg''') # turn image in gray scale value _UpperCAmelCase : Union[str, Any] = cvtColor(img, COLOR_BGR2GRAY) # get values with two different mask size _UpperCAmelCase : Tuple = gaussian_filter(gray, 3, sigma=1) _UpperCAmelCase : int = gaussian_filter(gray, 5, sigma=0.8) # show result images imshow('''gaussian filter with 3x3 mask''', gaussianaxa) imshow('''gaussian filter with 5x5 mask''', gaussianaxa) waitKey()
107
import os import sys import unittest __lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers') __lowerCAmelCase : Optional[Any] ='\n{0} = None\n' __lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' __lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Tuple ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tokenizers''' ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' ) __SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' ) __SCREAMING_SNAKE_CASE : Any = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' ) __SCREAMING_SNAKE_CASE : List[str] = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' ) def __magic_name__( self :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , lowerCAmelCase__ ) self.assertIn('''tensorflow_text''' , lowerCAmelCase__ ) self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def __magic_name__( self :Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' ) __SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __SCREAMING_SNAKE_CASE : int = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
696
0
import os try: from .build_directory_md import good_file_paths except ImportError: from build_directory_md import good_file_paths # type: ignore __a: str = list(good_file_paths()) assert filepaths, "good_file_paths() failed!" __a: List[Any] = [file for file in filepaths if file != file.lower()] if upper_files: print(F"{len(upper_files)} files contain uppercase characters:") print('''\n'''.join(upper_files) + '''\n''') __a: str = [file for file in filepaths if ''' ''' in file] if space_files: print(F"{len(space_files)} files contain space characters:") print('''\n'''.join(space_files) + '''\n''') __a: Dict = [file for file in filepaths if '''-''' in file] if hyphen_files: print(F"{len(hyphen_files)} files contain hyphen characters:") print('''\n'''.join(hyphen_files) + '''\n''') __a: Optional[Any] = [file for file in filepaths if os.sep not in file] if nodir_files: print(F"{len(nodir_files)} files are not in a directory:") print('''\n'''.join(nodir_files) + '''\n''') __a: Tuple = len(upper_files + space_files + hyphen_files + nodir_files) if bad_files: import sys sys.exit(bad_files)
108
import math from numpy import inf from scipy.integrate import quad def _UpperCamelCase ( lowercase__ ): if num <= 0: raise ValueError('''math domain error''' ) return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0] def _UpperCamelCase ( lowercase__ , lowercase__ ): return math.pow(lowercase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
696
0
'''simple docstring''' import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> int: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) __SCREAMING_SNAKE_CASE = [] for i in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase , dtype=torch.floataa ) class __a ( _snake_case, _snake_case ): __UpperCamelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers] __UpperCamelCase : Union[str, Any] = 2 @register_to_config def __init__( self : Union[str, Any] ,lowerCamelCase : int = 1000 ,lowerCamelCase : float = 0.00_085 ,lowerCamelCase : float = 0.012 ,lowerCamelCase : str = "linear" ,lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ,lowerCamelCase : str = "epsilon" ,lowerCamelCase : str = "linspace" ,lowerCamelCase : int = 0 ,): '''simple docstring''' if trained_betas is not None: __SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase ,dtype=torch.floataa ) elif beta_schedule == "linear": __SCREAMING_SNAKE_CASE = torch.linspace(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __SCREAMING_SNAKE_CASE = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowerCamelCase ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase ) else: raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" ) __SCREAMING_SNAKE_CASE = 1.0 - self.betas __SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ) def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ,lowerCamelCase : int=None ): '''simple docstring''' if schedule_timesteps is None: __SCREAMING_SNAKE_CASE = self.timesteps __SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __SCREAMING_SNAKE_CASE = 1 if len(lowerCamelCase ) > 1 else 0 else: __SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep __SCREAMING_SNAKE_CASE = self._index_counter[timestep_int] return indices[pos].item() @property def UpperCAmelCase__ ( self : List[Any] ): '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Union[float, torch.FloatTensor] ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase ) if self.state_in_first_order: __SCREAMING_SNAKE_CASE = self.sigmas[step_index] else: __SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index] __SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5) return sample def UpperCAmelCase__ ( self : int ,lowerCamelCase : int ,lowerCamelCase : Union[str, torch.device] = None ,lowerCamelCase : Optional[int] = None ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = num_inference_steps __SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __SCREAMING_SNAKE_CASE = np.linspace(0 ,num_train_timesteps - 1 ,lowerCamelCase ,dtype=lowerCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": __SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE = (np.arange(lowerCamelCase ,0 ,-step_ratio )).round().copy().astype(lowerCamelCase ) timesteps -= 1 else: raise ValueError( f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" ) __SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __SCREAMING_SNAKE_CASE = torch.from_numpy(np.log(lowerCamelCase ) ).to(lowerCamelCase ) __SCREAMING_SNAKE_CASE = np.interp(lowerCamelCase ,np.arange(0 ,len(lowerCamelCase ) ) ,lowerCamelCase ) __SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase ) # interpolate sigmas __SCREAMING_SNAKE_CASE = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() __SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) __SCREAMING_SNAKE_CASE = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(lowerCamelCase ).startswith("""mps""" ): # mps does not support float64 __SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase ,dtype=torch.floataa ) else: __SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase ) # interpolate timesteps __SCREAMING_SNAKE_CASE = self.sigma_to_t(lowerCamelCase ).to(lowerCamelCase ,dtype=timesteps.dtype ) __SCREAMING_SNAKE_CASE = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() __SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], interleaved_timesteps] ) __SCREAMING_SNAKE_CASE = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __SCREAMING_SNAKE_CASE = defaultdict(lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Dict ): '''simple docstring''' __SCREAMING_SNAKE_CASE = sigma.log() # get distribution __SCREAMING_SNAKE_CASE = log_sigma - self.log_sigmas[:, None] # get sigmas range __SCREAMING_SNAKE_CASE = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) __SCREAMING_SNAKE_CASE = low_idx + 1 __SCREAMING_SNAKE_CASE = self.log_sigmas[low_idx] __SCREAMING_SNAKE_CASE = self.log_sigmas[high_idx] # interpolate sigmas __SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high) __SCREAMING_SNAKE_CASE = w.clamp(0 ,1 ) # transform interpolation to time range __SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx __SCREAMING_SNAKE_CASE = t.view(sigma.shape ) return t @property def UpperCAmelCase__ ( self : Any ): '''simple docstring''' return self.sample is None def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : Union[float, torch.FloatTensor] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : bool = True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase ) # advance index counter by 1 __SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __SCREAMING_SNAKE_CASE = self.sigmas[step_index] __SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index + 1] __SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method __SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1] __SCREAMING_SNAKE_CASE = self.sigmas_interpol[step_index] __SCREAMING_SNAKE_CASE = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol __SCREAMING_SNAKE_CASE = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_interpol __SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError("""prediction_type not implemented yet: sample""" ) else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __SCREAMING_SNAKE_CASE = sigma_interpol - sigma_hat # store for 2nd order step __SCREAMING_SNAKE_CASE = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order __SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep __SCREAMING_SNAKE_CASE = sigma_next - sigma_hat __SCREAMING_SNAKE_CASE = self.sample __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCamelCase ) def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase ): # mps does not support float64 __SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) __SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: __SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device ) __SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ) __SCREAMING_SNAKE_CASE = [self.index_for_timestep(lowerCamelCase ,lowerCamelCase ) for t in timesteps] __SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 ) __SCREAMING_SNAKE_CASE = original_samples + noise * sigma return noisy_samples def __len__( self : List[Any] ): '''simple docstring''' return self.config.num_train_timesteps
109
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate __SCREAMING_SNAKE_CASE : int = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
696
0
"""simple docstring""" import logging import torch from accelerate import Accelerator from arguments import EvaluationArguments from datasets import load_dataset from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed class _lowercase ( A__ ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=1024 , UpperCamelCase_=1024 , UpperCamelCase_=3.6 ): __magic_name__ = tokenizer __magic_name__ = tokenizer.bos_token_id __magic_name__ = dataset __magic_name__ = seq_length __magic_name__ = seq_length * chars_per_token * num_of_sequences def __iter__( self ): __magic_name__ = iter(self.dataset ) __magic_name__ = True while more_examples: __magic_name__ = [], 0 while True: if buffer_len >= self.input_characters: break try: buffer.append(next(lowerCAmelCase__ )['''content'''] ) buffer_len += len(buffer[-1] ) except StopIteration: __magic_name__ = False break __magic_name__ = tokenizer(lowerCAmelCase__ , truncation=lowerCAmelCase__ )['''input_ids'''] __magic_name__ = [] for tokenized_input in tokenized_inputs: all_token_ids.extend(tokenized_input + [self.concat_token_id] ) for i in range(0 , len(lowerCAmelCase__ ) , self.seq_length ): __magic_name__ = all_token_ids[i : i + self.seq_length] if len(lowerCAmelCase__ ) == self.seq_length: yield torch.tensor(lowerCAmelCase__ ) def lowercase ( __UpperCamelCase ) -> List[str]: __magic_name__ = {'''streaming''': True} __magic_name__ = load_dataset(args.dataset_name , split='''train''' , **lowercase__ ) __magic_name__ = ConstantLengthDataset(lowercase__ , lowercase__ , seq_length=args.seq_length ) __magic_name__ = DataLoader(lowercase__ , batch_size=args.batch_size ) return eval_dataloader def lowercase ( __UpperCamelCase ) -> Any: model.eval() __magic_name__ = [] for step, batch in enumerate(lowercase__ ): with torch.no_grad(): __magic_name__ = model(lowercase__ , labels=lowercase__ ) __magic_name__ = outputs.loss.repeat(args.batch_size ) losses.append(accelerator.gather(lowercase__ ) ) if args.max_eval_steps > 0 and step >= args.max_eval_steps: break __magic_name__ = torch.mean(torch.cat(lowercase__ ) ) try: __magic_name__ = torch.exp(lowercase__ ) except OverflowError: __magic_name__ = float('''inf''' ) return loss.item(), perplexity.item() # Setup Accelerator __lowerCamelCase = Accelerator() # Parse configuration __lowerCamelCase = HfArgumentParser(EvaluationArguments) __lowerCamelCase = parser.parse_args() set_seed(args.seed) # Logging __lowerCamelCase = logging.getLogger(__name__) logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO ) # Load model and tokenizer __lowerCamelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt) __lowerCamelCase = AutoTokenizer.from_pretrained(args.model_ckpt) # Load dataset and dataloader __lowerCamelCase = create_dataloader(args) # Prepare everything with our `accelerator`. __lowerCamelCase = accelerator.prepare(model, eval_dataloader) # Evaluate and save the last checkpoint logger.info("Evaluating and saving model after training") __lowerCamelCase = evaluate(args) logger.info(f"""loss/eval: {eval_loss}, perplexity: {perplexity}""")
490
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ ) if is_square_free(lowercase__ ): return -1 if len(lowercase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
696
0
'''simple docstring''' from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def __UpperCAmelCase ( a_: str, a_: List[str], a_: Optional[Any]=1e-1_2 ): _UpperCAmelCase : Tuple = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowercase__, axis=1 ), a_min=lowercase__ ) ).T _UpperCAmelCase : List[Any] = jnp.divide(emb_a.T, jnp.clip(jnp.linalg.norm(lowercase__, axis=1 ), a_min=lowercase__ ) ).T return jnp.matmul(lowercase__, norm_emb_a.T ) class A__ ( nn.Module ): """simple docstring""" UpperCamelCase_ : CLIPConfig UpperCamelCase_ : jnp.dtype = jnp.floataa def _lowerCAmelCase ( self : Dict ) -> List[str]: """simple docstring""" _UpperCAmelCase : Any = FlaxCLIPVisionModule(self.config.vision_config ) _UpperCAmelCase : Any = nn.Dense(self.config.projection_dim , use_bias=lowerCAmelCase__ , dtype=self.dtype ) _UpperCAmelCase : List[str] = self.param("concept_embeds" , jax.nn.initializers.ones , (1_7, self.config.projection_dim) ) _UpperCAmelCase : Any = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) _UpperCAmelCase : Dict = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (1_7,) ) _UpperCAmelCase : Optional[Any] = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self : Optional[Any] , lowerCAmelCase__ : Dict ) -> int: """simple docstring""" _UpperCAmelCase : str = self.vision_model(lowerCAmelCase__ )[1] _UpperCAmelCase : List[str] = self.visual_projection(lowerCAmelCase__ ) _UpperCAmelCase : Optional[Any] = jax_cosine_distance(lowerCAmelCase__ , self.special_care_embeds ) _UpperCAmelCase : Dict = jax_cosine_distance(lowerCAmelCase__ , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs _UpperCAmelCase : Union[str, Any] = 0.0 _UpperCAmelCase : Tuple = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment _UpperCAmelCase : Dict = jnp.round(lowerCAmelCase__ , 3 ) _UpperCAmelCase : Optional[int] = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCAmelCase__ ) # Use a lower threshold if an image has any special care concept _UpperCAmelCase : Optional[int] = is_special_care * 0.01 _UpperCAmelCase : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment _UpperCAmelCase : int = jnp.round(lowerCAmelCase__ , 3 ) _UpperCAmelCase : Optional[int] = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class A__ ( A__ ): """simple docstring""" UpperCamelCase_ : List[Any] = CLIPConfig UpperCamelCase_ : Union[str, Any] = '''clip_input''' UpperCamelCase_ : Optional[int] = FlaxStableDiffusionSafetyCheckerModule def __init__( self : List[str] , lowerCAmelCase__ : CLIPConfig , lowerCAmelCase__ : Optional[Tuple] = None , lowerCAmelCase__ : int = 0 , lowerCAmelCase__ : jnp.dtype = jnp.floataa , lowerCAmelCase__ : bool = True , **lowerCAmelCase__ : Optional[Any] , ) -> Optional[Any]: """simple docstring""" if input_shape is None: _UpperCAmelCase : Any = (1, 2_2_4, 2_2_4, 3) _UpperCAmelCase : Dict = self.module_class(config=lowerCAmelCase__ , dtype=lowerCAmelCase__ , **lowerCAmelCase__ ) super().__init__(lowerCAmelCase__ , lowerCAmelCase__ , input_shape=lowerCAmelCase__ , seed=lowerCAmelCase__ , dtype=lowerCAmelCase__ , _do_init=_do_init ) def _lowerCAmelCase ( self : Any , lowerCAmelCase__ : jax.random.KeyArray , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : FrozenDict = None ) -> FrozenDict: """simple docstring""" _UpperCAmelCase : List[Any] = jax.random.normal(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : int = jax.random.split(lowerCAmelCase__ ) _UpperCAmelCase : Union[str, Any] = {'''params''': params_rng, '''dropout''': dropout_rng} _UpperCAmelCase : List[Any] = self.module.init(lowerCAmelCase__ , lowerCAmelCase__ )['''params'''] return random_params def __call__( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : dict = None , ) -> Optional[Any]: """simple docstring""" _UpperCAmelCase : Union[str, Any] = jnp.transpose(lowerCAmelCase__ , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(lowerCAmelCase__ , dtype=jnp.floataa ) , rngs={} , )
494
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __lowerCAmelCase : int ={ 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_2_8, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 5_0, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 1_0, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 1_0, 'exponential_decay_length_penalty': (5, 1.0_1), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class _lowercase ( unittest.TestCase ): '''simple docstring''' @classmethod def __magic_name__( cls :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = TOKEN HfFolder.save_token(lowerCAmelCase__ ) @classmethod def __magic_name__( cls :List[str] ) -> List[str]: try: delete_repo(token=cls._token , repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''' ) except HTTPError: pass def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''test-config''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :Dict ) -> Optional[int]: CustomConfig.register_for_auto_class() __SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 ) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' ) self.assertEqual(new_config.attribute , 42 ) class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :List[str] ) -> Dict: __SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int __SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float __SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool __SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str c.update_from_string( f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' ) self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' ) self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' ) self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' ) def __magic_name__( self :Dict ) -> str: __SCREAMING_SNAKE_CASE : Dict = PretrainedConfig() __SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) __SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )] if len(lowerCAmelCase__ ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' f''' {', '.join(lowerCAmelCase__ )}.''' ) def __magic_name__( self :Union[str, Any] ) -> List[Any]: with self.assertRaises(lowerCAmelCase__ ): # config is in subfolder, the following should not work without specifying the subfolder __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) __SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' ) self.assertIsNotNone(lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down __SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock() __SCREAMING_SNAKE_CASE : List[Any] = 500 __SCREAMING_SNAKE_CASE : Union[str, Any] = {} __SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError __SCREAMING_SNAKE_CASE : str = {} # Download this model to make sure it's in the cache. __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head: __SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def __magic_name__( self :Union[str, Any] ) -> List[Any]: # This test is for deprecated behavior and can be removed in v5 __SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def __magic_name__( self :str ) -> List[str]: __SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = 2 json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json'''] __SCREAMING_SNAKE_CASE : Tuple = 768 configuration.save_pretrained(lowerCAmelCase__ ) shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) ) __SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 768 ) def __magic_name__( self :List[str] ) -> Union[str, Any]: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. __SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers __SCREAMING_SNAKE_CASE : int = '''v4.0.0''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained( lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCAmelCase__ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0''' __SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(old_configuration.hidden_size , 768 )
696
0
"""simple docstring""" import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class SCREAMING_SNAKE_CASE__ ( A__ ): __lowerCAmelCase : str = '''Speech2TextFeatureExtractor''' __lowerCAmelCase : Tuple = '''Speech2TextTokenizer''' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) UpperCAmelCase : int = self.feature_extractor UpperCAmelCase : int = False def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]: '''simple docstring''' if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ ) if "raw_speech" in kwargs: warnings.warn("""Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.""" ) UpperCAmelCase : Optional[int] = kwargs.pop("""raw_speech""" ) else: UpperCAmelCase : List[str] = kwargs.pop("""audio""" , lowerCAmelCase__ ) UpperCAmelCase : List[Any] = kwargs.pop("""sampling_rate""" , lowerCAmelCase__ ) UpperCAmelCase : Tuple = kwargs.pop("""text""" , lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: UpperCAmelCase : List[str] = args[0] UpperCAmelCase : Union[str, Any] = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: UpperCAmelCase : Optional[Any] = self.feature_extractor(lowerCAmelCase__ , *lowerCAmelCase__ , sampling_rate=lowerCAmelCase__ , **lowerCAmelCase__ ) if text is not None: UpperCAmelCase : Any = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ ) if text is None: return inputs elif audio is None: return encodings else: UpperCAmelCase : str = encodings['''input_ids'''] return inputs def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Union[str, Any]: '''simple docstring''' return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> int: '''simple docstring''' return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @contextmanager def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]: '''simple docstring''' warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your audio inputs, or in a separate call.""" ) UpperCAmelCase : int = True UpperCAmelCase : Union[str, Any] = self.tokenizer yield UpperCAmelCase : Dict = self.feature_extractor UpperCAmelCase : Any = False
160
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Any ={ 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int =['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str =[ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
696
0
import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 SCREAMING_SNAKE_CASE__ : Tuple = data_utils.TransfoXLTokenizer SCREAMING_SNAKE_CASE__ : Optional[int] = data_utils.TransfoXLCorpus SCREAMING_SNAKE_CASE__ : str = data_utils SCREAMING_SNAKE_CASE__ : Optional[int] = data_utils def _lowerCamelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> List[str]: '''simple docstring''' if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(lowercase__ , """rb""" ) as fp: UpperCAmelCase__ : List[str] = pickle.load(lowercase__ , encoding="""latin1""" ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''pretrained_vocab_file'''] print(F"Save vocabulary to {pytorch_vocab_dump_path}" ) UpperCAmelCase__ : str = corpus.vocab.__dict__ torch.save(lowercase__ , lowercase__ ) UpperCAmelCase__ : List[Any] = corpus.__dict__ corpus_dict_no_vocab.pop("""vocab""" , lowercase__ ) UpperCAmelCase__ : List[Any] = pytorch_dump_folder_path + '''/''' + CORPUS_NAME print(F"Save dataset to {pytorch_dataset_dump_path}" ) torch.save(lowercase__ , lowercase__ ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model UpperCAmelCase__ : Tuple = os.path.abspath(lowercase__ ) UpperCAmelCase__ : int = os.path.abspath(lowercase__ ) print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." ) # Initialise PyTorch model if transfo_xl_config_file == "": UpperCAmelCase__ : Any = TransfoXLConfig() else: UpperCAmelCase__ : Tuple = TransfoXLConfig.from_json_file(lowercase__ ) print(F"Building PyTorch model from configuration: {config}" ) UpperCAmelCase__ : Union[str, Any] = TransfoXLLMHeadModel(lowercase__ ) UpperCAmelCase__ : str = load_tf_weights_in_transfo_xl(lowercase__ , lowercase__ , lowercase__ ) # Save pytorch-model UpperCAmelCase__ : Any = os.path.join(lowercase__ , lowercase__ ) UpperCAmelCase__ : Union[str, Any] = os.path.join(lowercase__ , lowercase__ ) print(F"Save PyTorch model to {os.path.abspath(lowercase__ )}" ) torch.save(model.state_dict() , lowercase__ ) print(F"Save configuration file to {os.path.abspath(lowercase__ )}" ) with open(lowercase__ , """w""" , encoding="""utf-8""" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the folder to store the PyTorch model or dataset/vocab.""", ) parser.add_argument( """--tf_checkpoint_path""", default="""""", type=str, help="""An optional path to a TensorFlow checkpoint path to be converted.""", ) parser.add_argument( """--transfo_xl_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--transfo_xl_dataset_file""", default="""""", type=str, help="""An optional dataset file to be converted in a vocabulary.""", ) SCREAMING_SNAKE_CASE__ : str = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
79
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : List[Any] ={ 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers''' SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values'''] SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple: __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : Optional[int] = d_kv __SCREAMING_SNAKE_CASE : Tuple = d_ff __SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers __SCREAMING_SNAKE_CASE : List[Any] = num_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers else: __SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: __SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers __SCREAMING_SNAKE_CASE : List[Any] = num_heads __SCREAMING_SNAKE_CASE : List[Any] = num_experts __SCREAMING_SNAKE_CASE : Tuple = expert_capacity __SCREAMING_SNAKE_CASE : List[Any] = router_bias __SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) __SCREAMING_SNAKE_CASE : List[Any] = router_dtype __SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens __SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets __SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance __SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate __SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon __SCREAMING_SNAKE_CASE : int = initializer_factor __SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj __SCREAMING_SNAKE_CASE : Any = use_cache __SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs __SCREAMING_SNAKE_CASE : int = router_z_loss_coef __SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef __SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' ) __SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1] __SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated''' if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new''' super().__init__( pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
696
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class snake_case_ ( A__ ,unittest.TestCase ): A_ = KandinskyVaaControlnetImgaImgPipeline A_ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] A_ = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''hint'''] A_ = [ '''generator''', '''height''', '''width''', '''strength''', '''guidance_scale''', '''num_inference_steps''', '''return_dict''', '''guidance_scale''', '''num_images_per_prompt''', '''output_type''', '''return_dict''', ] A_ = False @property def UpperCAmelCase__ ( self : List[str] )->Optional[Any]: '''simple docstring''' return 32 @property def UpperCAmelCase__ ( self : Any )->Any: '''simple docstring''' return 32 @property def UpperCAmelCase__ ( self : int )->str: '''simple docstring''' return self.time_input_dim @property def UpperCAmelCase__ ( self : Optional[int] )->str: '''simple docstring''' return self.time_input_dim * 4 @property def UpperCAmelCase__ ( self : Dict )->Dict: '''simple docstring''' return 100 @property def UpperCAmelCase__ ( self : Dict )->Dict: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : Union[str, Any] = { '''in_channels''': 8, # Out channels is double in channels because predicts mean and variance '''out_channels''': 8, '''addition_embed_type''': '''image_hint''', '''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''), '''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''), '''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''', '''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2), '''layers_per_block''': 1, '''encoder_hid_dim''': self.text_embedder_hidden_size, '''encoder_hid_dim_type''': '''image_proj''', '''cross_attention_dim''': self.cross_attention_dim, '''attention_head_dim''': 4, '''resnet_time_scale_shift''': '''scale_shift''', '''class_embed_type''': None, } __lowerCAmelCase : str = UNetaDConditionModel(**lowerCAmelCase__ ) return model @property def UpperCAmelCase__ ( self : int )->Optional[int]: '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def UpperCAmelCase__ ( self : List[Any] )->str: '''simple docstring''' torch.manual_seed(0 ) __lowerCAmelCase : Union[str, Any] = VQModel(**self.dummy_movq_kwargs ) return model def UpperCAmelCase__ ( self : Union[str, Any] )->Dict: '''simple docstring''' __lowerCAmelCase : int = self.dummy_unet __lowerCAmelCase : Any = self.dummy_movq __lowerCAmelCase : List[Any] = { '''num_train_timesteps''': 1000, '''beta_schedule''': '''linear''', '''beta_start''': 0.00_085, '''beta_end''': 0.012, '''clip_sample''': False, '''set_alpha_to_one''': False, '''steps_offset''': 0, '''prediction_type''': '''epsilon''', '''thresholding''': False, } __lowerCAmelCase : List[Any] = DDIMScheduler(**lowerCAmelCase__ ) __lowerCAmelCase : List[str] = { '''unet''': unet, '''scheduler''': scheduler, '''movq''': movq, } return components def UpperCAmelCase__ ( self : Tuple , _snake_case : List[Any] , _snake_case : Optional[int]=0 )->Any: '''simple docstring''' __lowerCAmelCase : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) __lowerCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( lowerCAmelCase__ ) # create init_image __lowerCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) __lowerCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase : Dict = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert("""RGB""" ).resize((256, 256) ) # create hint __lowerCAmelCase : Optional[int] = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) if str(lowerCAmelCase__ ).startswith("""mps""" ): __lowerCAmelCase : int = torch.manual_seed(lowerCAmelCase__ ) else: __lowerCAmelCase : Tuple = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) __lowerCAmelCase : Any = { '''image''': init_image, '''image_embeds''': image_embeds, '''negative_image_embeds''': negative_image_embeds, '''hint''': hint, '''generator''': generator, '''height''': 64, '''width''': 64, '''num_inference_steps''': 10, '''guidance_scale''': 7.0, '''strength''': 0.2, '''output_type''': '''np''', } return inputs def UpperCAmelCase__ ( self : Optional[int] )->Any: '''simple docstring''' __lowerCAmelCase : Optional[Any] = '''cpu''' __lowerCAmelCase : Any = self.get_dummy_components() __lowerCAmelCase : Optional[Any] = self.pipeline_class(**lowerCAmelCase__ ) __lowerCAmelCase : Dict = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __lowerCAmelCase : int = pipe(**self.get_dummy_inputs(lowerCAmelCase__ ) ) __lowerCAmelCase : Dict = output.images __lowerCAmelCase : int = pipe( **self.get_dummy_inputs(lowerCAmelCase__ ) , return_dict=lowerCAmelCase__ , )[0] __lowerCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] __lowerCAmelCase : Tuple = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : Optional[int] = np.array( [0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): def UpperCAmelCase__ ( self : Dict )->List[str]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCAmelCase__ ( self : List[Any] )->int: '''simple docstring''' __lowerCAmelCase : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy""" ) __lowerCAmelCase : List[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __lowerCAmelCase : Dict = init_image.resize((512, 512) ) __lowerCAmelCase : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) __lowerCAmelCase : List[Any] = torch.from_numpy(np.array(lowerCAmelCase__ ) ).float() / 255.0 __lowerCAmelCase : List[str] = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __lowerCAmelCase : Union[str, Any] = '''A robot, 4k photo''' __lowerCAmelCase : Dict = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(lowerCAmelCase__ ) __lowerCAmelCase : List[str] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) __lowerCAmelCase : int = pipeline.to(lowerCAmelCase__ ) pipeline.set_progress_bar_config(disable=lowerCAmelCase__ ) __lowerCAmelCase : Any = torch.Generator(device="""cpu""" ).manual_seed(0 ) __lowerCAmelCase : Any = pipe_prior( lowerCAmelCase__ , image=lowerCAmelCase__ , strength=0.85 , generator=lowerCAmelCase__ , negative_prompt="""""" , ).to_tuple() __lowerCAmelCase : Dict = pipeline( image=lowerCAmelCase__ , image_embeds=lowerCAmelCase__ , negative_image_embeds=lowerCAmelCase__ , hint=lowerCAmelCase__ , generator=lowerCAmelCase__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type="""np""" , ) __lowerCAmelCase : Tuple = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(lowerCAmelCase__ , lowerCAmelCase__ )
504
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
696
0
'''simple docstring''' import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class A__ ( nn.Module ): def __init__( self : Optional[Any] ) -> str: '''simple docstring''' super().__init__() _SCREAMING_SNAKE_CASE =nn.Linear(3 , 4 ) _SCREAMING_SNAKE_CASE =nn.BatchNormad(4 ) _SCREAMING_SNAKE_CASE =nn.Linear(4 , 5 ) def A ( self : Optional[int] , _a : Tuple ) -> Any: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) ) class A__ ( unittest.TestCase ): def A ( self : List[Any] ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCAmelCase__ , model.state_dict() ) _SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase__ , 'index.json' ) self.assertTrue(os.path.isfile(lowerCAmelCase__ ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: _SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase__ , f"{key}.dat" ) self.assertTrue(os.path.isfile(lowerCAmelCase__ ) ) # TODO: add tests on the fact weights are properly loaded def A ( self : int ) -> Optional[int]: '''simple docstring''' _SCREAMING_SNAKE_CASE =[torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: _SCREAMING_SNAKE_CASE =torch.randn(2 , 3 , dtype=lowerCAmelCase__ ) with TemporaryDirectory() as tmp_dir: _SCREAMING_SNAKE_CASE =offload_weight(lowerCAmelCase__ , 'weight' , lowerCAmelCase__ , {} ) _SCREAMING_SNAKE_CASE =os.path.join(lowerCAmelCase__ , 'weight.dat' ) self.assertTrue(os.path.isfile(lowerCAmelCase__ ) ) self.assertDictEqual(lowerCAmelCase__ , {'weight': {'shape': [2, 3], 'dtype': str(lowerCAmelCase__ ).split('.' )[1]}} ) _SCREAMING_SNAKE_CASE =load_offloaded_weight(lowerCAmelCase__ , index['weight'] ) self.assertTrue(torch.equal(lowerCAmelCase__ , lowerCAmelCase__ ) ) def A ( self : int ) -> Dict: '''simple docstring''' _SCREAMING_SNAKE_CASE =ModelForTest() _SCREAMING_SNAKE_CASE =model.state_dict() _SCREAMING_SNAKE_CASE ={k: v for k, v in state_dict.items() if '''linear2''' not in k} _SCREAMING_SNAKE_CASE ={k: v for k, v in state_dict.items() if '''linear2''' in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__ ) # Every key is there with the right value self.assertEqual(sorted(lowerCAmelCase__ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key] ) ) _SCREAMING_SNAKE_CASE ={k: v for k, v in state_dict.items() if '''weight''' in k} _SCREAMING_SNAKE_CASE ={k: v for k, v in state_dict.items() if '''weight''' not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__ ) # Every key is there with the right value self.assertEqual(sorted(lowerCAmelCase__ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__ ) # Duplicates are removed _SCREAMING_SNAKE_CASE =OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__ ) # Every key is there with the right value self.assertEqual(sorted(lowerCAmelCase__ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key] ) ) def A ( self : str ) -> Any: '''simple docstring''' _SCREAMING_SNAKE_CASE ={'''a.1''': 0, '''a.10''': 1, '''a.2''': 2} _SCREAMING_SNAKE_CASE =extract_submodules_state_dict(lowerCAmelCase__ , ['a.1', 'a.2'] ) self.assertDictEqual(lowerCAmelCase__ , {'a.1': 0, 'a.2': 2} ) _SCREAMING_SNAKE_CASE ={'''a.1.a''': 0, '''a.10.a''': 1, '''a.2.a''': 2} _SCREAMING_SNAKE_CASE =extract_submodules_state_dict(lowerCAmelCase__ , ['a.1', 'a.2'] ) self.assertDictEqual(lowerCAmelCase__ , {'a.1.a': 0, 'a.2.a': 2} )
405
from datetime import datetime import requests def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(lowercase__ ).content if __name__ == "__main__": __lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip() __lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f"""Done. Video saved to disk as {file_name}.""")
696
0
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class SCREAMING_SNAKE_CASE ( A__ ): """simple docstring""" def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'tf_padding' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'depth_multiplier' ) ) class SCREAMING_SNAKE_CASE : """simple docstring""" def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=3 , __UpperCamelCase=32 , __UpperCamelCase=0.25 , __UpperCamelCase=8 , __UpperCamelCase=True , __UpperCamelCase=10_24 , __UpperCamelCase=32 , __UpperCamelCase="relu6" , __UpperCamelCase=0.1 , __UpperCamelCase=0.02 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=10 , __UpperCamelCase=None , ): """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = num_channels snake_case_ = image_size snake_case_ = depth_multiplier snake_case_ = min_depth snake_case_ = tf_padding snake_case_ = int(last_hidden_size * depth_multiplier ) snake_case_ = output_stride snake_case_ = hidden_act snake_case_ = classifier_dropout_prob snake_case_ = use_labels snake_case_ = is_training snake_case_ = num_labels snake_case_ = initializer_range snake_case_ = scope def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size] , self.num_labels ) snake_case_ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) snake_case_ = self.get_config() return config, pixel_values, labels, pixel_labels def __lowerCAmelCase ( self ): """simple docstring""" return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" snake_case_ = MobileNetVaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __lowerCAmelCase ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): """simple docstring""" snake_case_ = self.num_labels snake_case_ = MobileNetVaForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() snake_case_ = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.prepare_config_and_inputs() snake_case_ = config_and_inputs snake_case_ = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ): """simple docstring""" __A = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () __A = ( {'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification} if is_torch_available() else {} ) __A = False __A = False __A = False __A = False def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = MobileNetVaModelTester(self ) snake_case_ = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def __lowerCAmelCase ( self ): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='MobileNetV1 does not use inputs_embeds' ) def __lowerCAmelCase ( self ): """simple docstring""" pass @unittest.skip(reason='MobileNetV1 does not support input and output embeddings' ) def __lowerCAmelCase ( self ): """simple docstring""" pass @unittest.skip(reason='MobileNetV1 does not output attentions' ) def __lowerCAmelCase ( self ): """simple docstring""" pass def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__ ) snake_case_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __lowerCAmelCase ( self ): """simple docstring""" def check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): snake_case_ = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) snake_case_ = outputs.hidden_states snake_case_ = 26 self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def __lowerCAmelCase ( self ): """simple docstring""" for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = MobileNetVaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def a(): '''simple docstring''' snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" @cached_property def __lowerCAmelCase ( self ): """simple docstring""" return ( MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v1_1.0_224' ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v1_1.0_224' ).to(lowerCAmelCase__ ) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=lowerCAmelCase__ , return_tensors='pt' ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): snake_case_ = model(**lowerCAmelCase__ ) # verify the logits snake_case_ = torch.Size((1, 10_01) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) snake_case_ = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
187
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowercase ( A__ , A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __magic_name__( self :int ) -> Optional[int]: torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] __SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ) if str(lowerCAmelCase__ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ ) else: __SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Union[str, Any] ) -> str: __SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Any = self.get_dummy_components() __SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = '''french fries''' __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = output.images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0 __SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5 __SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) __SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) __SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Union[str, Any] ) -> Dict: __SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __magic_name__( self :str ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0] __SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae'''] __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): __SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode() __SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0] __SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Union[str, Any] ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) __SCREAMING_SNAKE_CASE : Dict = { '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Dict ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = self.get_inputs() __SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Optional[int] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : str = self.get_inputs() __SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = 0 def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None: __SCREAMING_SNAKE_CASE : Dict = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: __SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __magic_name__( self :List[str] ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __magic_name__( self :int ) -> Tuple: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 __SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) ) __SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix''' __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = output.images[0] __SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) __SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
696
0
import json import multiprocessing import os import re from collections import defaultdict import torch from accelerate import Accelerator from accelerate.utils import set_seed from arguments import HumanEvalArguments from datasets import load_dataset, load_metric from torch.utils.data import IterableDataset from torch.utils.data.dataloader import DataLoader from tqdm import tqdm import transformers from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, StoppingCriteria, StoppingCriteriaList _lowerCamelCase : List[str] = ['\nclass', '\ndef', '\n#', '\n@', '\nprint', '\nif'] class snake_case__ ( A__ ): '''simple docstring''' def __init__( self : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : Dict=1 ) -> str: UpperCAmelCase_ = tokenizer UpperCAmelCase_ = dataset UpperCAmelCase_ = len(lowerCAmelCase__ ) if n_tasks is None else n_tasks UpperCAmelCase_ = n_copies def __iter__( self : Dict ) -> Tuple: UpperCAmelCase_ = [] for task in range(self.n_tasks ): # without strip, the model generate commented codes ... prompts.append(self.tokenizer.eos_token + self.dataset[task]['''prompt'''].strip() ) UpperCAmelCase_ = self.tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='''pt''' ) for task in range(self.n_tasks ): for _ in range(self.n_copies ): yield { "ids": outputs.input_ids[task], "task_id": task, "input_len": outputs.attention_mask[task].sum(), } class snake_case__ ( A__ ): '''simple docstring''' def __init__( self : List[Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : int ) -> int: UpperCAmelCase_ = start_length UpperCAmelCase_ = eof_strings UpperCAmelCase_ = tokenizer def __call__( self : Optional[Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] , **lowerCAmelCase_ : Union[str, Any] ) -> List[Any]: UpperCAmelCase_ = self.tokenizer.batch_decode(input_ids[:, self.start_length :] ) UpperCAmelCase_ = [] for decoded_generation in decoded_generations: done.append(any(stop_string in decoded_generation for stop_string in self.eof_strings ) ) return all(lowerCAmelCase__ ) def _lowerCAmelCase ( __magic_name__ :str ): UpperCAmelCase_ = re.split('''(%s)''' % '''|'''.join(lowercase__ ) , lowercase__ ) # last string should be "" return "".join(string_list[:-2] ) def _lowerCAmelCase ( __magic_name__ :Optional[Any] , __magic_name__ :List[str] , __magic_name__ :str , __magic_name__ :Optional[Any] , __magic_name__ :Tuple , __magic_name__ :Any=2_0 , **__magic_name__ :Any ): UpperCAmelCase_ = defaultdict(lowercase__ ) # dict of list of generated tokens for step, batch in tqdm(enumerate(lowercase__ ) ): with torch.no_grad(): UpperCAmelCase_ = batch['''ids'''].shape[-1] UpperCAmelCase_ = accelerator.unwrap_model(lowercase__ ).generate( input_ids=batch['''ids'''][:, : batch['''input_len''']] , num_return_sequences=lowercase__ , **lowercase__ ) # each task is generated batch_size times UpperCAmelCase_ = batch['''task_id'''].repeat(lowercase__ ) UpperCAmelCase_ = accelerator.pad_across_processes( lowercase__ , dim=1 , pad_index=tokenizer.pad_token_id ) UpperCAmelCase_ = accelerator.gather((generated_tokens, generated_tasks) ) UpperCAmelCase_ = generated_tokens.cpu().numpy() UpperCAmelCase_ = generated_tasks.cpu().numpy() for task, generated_tokens in zip(lowercase__ , lowercase__ ): gen_token_dict[task].append(lowercase__ ) UpperCAmelCase_ = [[] for _ in range(lowercase__ )] for task, generated_tokens in gen_token_dict.items(): for s in generated_tokens: UpperCAmelCase_ = tokenizer.decode(lowercase__ , skip_special_tokens=lowercase__ , clean_up_tokenization_spaces=lowercase__ ) code_gens[task].append(remove_last_block(lowercase__ ) ) return code_gens def _lowerCAmelCase ( ): # Setup configuration UpperCAmelCase_ = HfArgumentParser(lowercase__ ) UpperCAmelCase_ = parser.parse_args() transformers.logging.set_verbosity_error() # enables code execution in code_eval metric UpperCAmelCase_ = args.HF_ALLOW_CODE_EVAL # make sure tokenizer plays nice with multiprocessing UpperCAmelCase_ = '''false''' if args.num_workers is None: UpperCAmelCase_ = multiprocessing.cpu_count() # Use dataset load to feed to accelerate UpperCAmelCase_ = Accelerator() set_seed(args.seed , device_specific=lowercase__ ) # Load model and tokenizer UpperCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt ) UpperCAmelCase_ = tokenizer.eos_token UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt ) # Generation settings UpperCAmelCase_ = { '''do_sample''': args.do_sample, '''temperature''': args.temperature, '''max_new_tokens''': args.max_new_tokens, '''top_p''': args.top_p, '''top_k''': args.top_k, '''stopping_criteria''': StoppingCriteriaList([EndOfFunctionCriteria(0 , lowercase__ , lowercase__ )] ), } # Load evaluation dataset and metric UpperCAmelCase_ = load_dataset('''openai_humaneval''' ) UpperCAmelCase_ = load_metric('''code_eval''' ) UpperCAmelCase_ = args.num_tasks if args.num_tasks is not None else len(human_eval['''test'''] ) UpperCAmelCase_ = args.n_samples // args.batch_size UpperCAmelCase_ = TokenizedDataset(lowercase__ , human_eval['''test'''] , n_copies=lowercase__ , n_tasks=lowercase__ ) # do not confuse args.batch_size, which is actually the num_return_sequences UpperCAmelCase_ = DataLoader(lowercase__ , batch_size=1 ) # Run a quick test to see if code evaluation is enabled try: UpperCAmelCase_ = code_eval_metric.compute(references=[''''''] , predictions=[['''''']] ) except ValueError as exception: print( '''Code evaluation not enabled. Read the warning below carefully and then use `--HF_ALLOW_CODE_EVAL="1"`''' ''' flag to enable code evaluation.''' ) raise exception UpperCAmelCase_ = accelerator.prepare(lowercase__ , lowercase__ ) UpperCAmelCase_ = complete_code( lowercase__ , lowercase__ , lowercase__ , lowercase__ , n_tasks=lowercase__ , batch_size=args.batch_size , **lowercase__ , ) if accelerator.is_main_process: UpperCAmelCase_ = [] for task in tqdm(range(lowercase__ ) ): UpperCAmelCase_ = human_eval['''test'''][task]['''test'''] UpperCAmelCase_ = F'''check({human_eval['test'][task]['entry_point']})''' references.append('''\n''' + test_func + '''\n''' + entry_point ) # Evaluate completions with "code_eval" metric UpperCAmelCase_ = code_eval_metric.compute( references=lowercase__ , predictions=lowercase__ , num_workers=args.num_workers ) print(F'''Results: {pass_at_k}''' ) # Save results to json file with open(args.output_file , '''w''' ) as fp: json.dump(lowercase__ , lowercase__ ) # For some reason the folliwng seems to be necessary sometimes for code_eval to work nice with multiprocessing # https://stackoverflow.com/questions/60804599/python-multiprocessing-keeps-spawning-the-whole-script if __name__ == "__main__": main()
121
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : torch.FloatTensor SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase__ ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) ) return torch.tensor(lowercase__ , dtype=torch.floataa ) class _lowercase ( A__ , A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 1 @register_to_config def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]: if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None: __SCREAMING_SNAKE_CASE : Optional[int] = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one'''] if trained_betas is not None: __SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "linear": __SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __SCREAMING_SNAKE_CASE : List[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas __SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. __SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution __SCREAMING_SNAKE_CASE : Any = 1.0 # setable values __SCREAMING_SNAKE_CASE : Optional[Any] = None __SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) ) def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor: return sample def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' f''' maximal {self.config.num_train_timesteps} timesteps.''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps __SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa ) __SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ ) self.timesteps += self.config.steps_offset def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) __SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process __SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep] __SCREAMING_SNAKE_CASE : str = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) __SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 __SCREAMING_SNAKE_CASE : List[Any] = model_output elif self.config.prediction_type == "sample": __SCREAMING_SNAKE_CASE : List[str] = model_output __SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": __SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output __SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: __SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __len__( self :Optional[int] ) -> List[Any]: return self.config.num_train_timesteps
696
0
"""simple docstring""" import unittest from diffusers.models.unet_ad_blocks import * # noqa F403 from diffusers.utils import torch_device from .test_unet_blocks_common import UNetBlockTesterMixin class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Dict = DownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : Any = '''down''' def A ( self : Tuple ) -> Tuple: lowercase_ : List[str] = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = ResnetDownsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : List[Any] = '''down''' def A ( self : str ) -> Optional[int]: lowercase_ : Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Dict = AttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''down''' def A ( self : Union[str, Any] ) -> Optional[int]: lowercase_ : Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Dict = CrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : List[str] = '''down''' def A ( self : Any ) -> List[str]: lowercase_ : Optional[Any] = super().prepare_init_args_and_inputs_for_common() lowercase_ : Optional[Any] = 32 return init_dict, inputs_dict def A ( self : Dict ) -> List[Any]: lowercase_ : Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = SimpleCrossAttnDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : List[Any] = '''down''' @property def A ( self : Tuple ) -> Union[str, Any]: return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ ) def A ( self : str ) -> Optional[Any]: lowercase_ : int = super().prepare_init_args_and_inputs_for_common() lowercase_ : List[Any] = 32 return init_dict, inputs_dict @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def A ( self : List[str] ) -> Any: lowercase_ : Any = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : List[Any] = SkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : Tuple = '''down''' @property def A ( self : str ) -> Tuple: return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ ) def A ( self : List[Any] ) -> List[str]: lowercase_ : Tuple = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : List[Any] = AttnSkipDownBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : str = '''down''' @property def A ( self : str ) -> Optional[int]: return super().get_dummy_input(include_skip_sample=lowerCAmelCase__ ) def A ( self : List[Any] ) -> Dict: lowercase_ : Dict = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = DownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : str = '''down''' @property def A ( self : Any ) -> List[Any]: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def A ( self : Optional[Any] ) -> Optional[Any]: lowercase_ : Any = { '''in_channels''': 32, '''out_channels''': 32, } lowercase_ : List[Any] = self.dummy_input return init_dict, inputs_dict def A ( self : Union[str, Any] ) -> List[str]: lowercase_ : Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : str = AttnDownEncoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : Tuple = '''down''' @property def A ( self : str ) -> Any: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def A ( self : Optional[Any] ) -> Tuple: lowercase_ : Optional[Any] = { '''in_channels''': 32, '''out_channels''': 32, } lowercase_ : List[Any] = self.dummy_input return init_dict, inputs_dict def A ( self : Optional[int] ) -> Optional[int]: lowercase_ : Tuple = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : str = UNetMidBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : Dict = '''mid''' def A ( self : Dict ) -> List[Any]: lowercase_ : Optional[int] = { '''in_channels''': 32, '''temb_channels''': 1_28, } lowercase_ : List[Any] = self.dummy_input return init_dict, inputs_dict def A ( self : List[str] ) -> Tuple: lowercase_ : List[str] = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : str = UNetMidBlockaDCrossAttn # noqa F405 SCREAMING_SNAKE_CASE_ : List[str] = '''mid''' def A ( self : Optional[Any] ) -> Optional[Any]: lowercase_ : Any = super().prepare_init_args_and_inputs_for_common() lowercase_ : Optional[int] = 32 return init_dict, inputs_dict def A ( self : Optional[int] ) -> int: lowercase_ : int = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Tuple = UNetMidBlockaDSimpleCrossAttn # noqa F405 SCREAMING_SNAKE_CASE_ : List[str] = '''mid''' @property def A ( self : List[str] ) -> Optional[int]: return super().get_dummy_input(include_encoder_hidden_states=lowerCAmelCase__ ) def A ( self : int ) -> str: lowercase_ : Tuple = super().prepare_init_args_and_inputs_for_common() lowercase_ : List[str] = 32 return init_dict, inputs_dict def A ( self : Dict ) -> Dict: lowercase_ : List[str] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Optional[Any] = UpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : int = '''up''' @property def A ( self : Union[str, Any] ) -> List[Any]: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def A ( self : Any ) -> List[Any]: lowercase_ : Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = ResnetUpsampleBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''up''' @property def A ( self : str ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def A ( self : Tuple ) -> Union[str, Any]: lowercase_ : List[Any] = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : str = CrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : List[Any] = '''up''' @property def A ( self : Optional[Any] ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def A ( self : List[str] ) -> List[str]: lowercase_ : str = super().prepare_init_args_and_inputs_for_common() lowercase_ : Tuple = 32 return init_dict, inputs_dict def A ( self : Optional[int] ) -> Optional[Any]: lowercase_ : Dict = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : List[Any] = SimpleCrossAttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : List[str] = '''up''' @property def A ( self : Optional[int] ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ , include_encoder_hidden_states=lowerCAmelCase__ ) def A ( self : List[Any] ) -> Tuple: lowercase_ : Dict = super().prepare_init_args_and_inputs_for_common() lowercase_ : Dict = 32 return init_dict, inputs_dict def A ( self : List[Any] ) -> List[Any]: lowercase_ : int = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Dict = AttnUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : Union[str, Any] = '''up''' @property def A ( self : Any ) -> str: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) @unittest.skipIf(torch_device == '''mps''' , '''MPS result is not consistent''' ) def A ( self : Optional[Any] ) -> Tuple: lowercase_ : List[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : List[Any] = SkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : Any = '''up''' @property def A ( self : str ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def A ( self : Optional[Any] ) -> List[Any]: lowercase_ : Union[str, Any] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Any = AttnSkipUpBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : str = '''up''' @property def A ( self : Union[str, Any] ) -> Tuple: return super().get_dummy_input(include_res_hidden_states_tuple=lowerCAmelCase__ ) def A ( self : Tuple ) -> Tuple: lowercase_ : Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = UpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : int = '''up''' @property def A ( self : List[str] ) -> Union[str, Any]: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def A ( self : Optional[Any] ) -> int: lowercase_ : List[str] = {'''in_channels''': 32, '''out_channels''': 32} lowercase_ : Any = self.dummy_input return init_dict, inputs_dict def A ( self : Any ) -> Any: lowercase_ : Union[str, Any] = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137] super().test_output(lowerCAmelCase__ ) class _UpperCAmelCase ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = AttnUpDecoderBlockaD # noqa F405 SCREAMING_SNAKE_CASE_ : int = '''up''' @property def A ( self : List[str] ) -> str: return super().get_dummy_input(include_temb=lowerCAmelCase__ ) def A ( self : int ) -> Optional[Any]: lowercase_ : Dict = {'''in_channels''': 32, '''out_channels''': 32} lowercase_ : Optional[Any] = self.dummy_input return init_dict, inputs_dict def A ( self : Any ) -> Optional[Any]: lowercase_ : Optional[Any] = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568] super().test_output(lowerCAmelCase__ )
231
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Value from .base import TaskTemplate @dataclass(frozen=A__ ) class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = field(default='''summarization''' , metadata={'''include_in_asdict_even_if_is_default''': True} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''text''': Value('''string''' )} ) SCREAMING_SNAKE_CASE__ : ClassVar[Features] = Features({'''summary''': Value('''string''' )} ) SCREAMING_SNAKE_CASE__ : str = "text" SCREAMING_SNAKE_CASE__ : str = "summary" @property def __magic_name__( self :Union[str, Any] ) -> Dict[str, str]: return {self.text_column: "text", self.summary_column: "summary"}
696
0
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class _UpperCAmelCase ( A__ ): a__ : List[str] = ['''image_processor''', '''tokenizer'''] a__ : Any = '''AutoImageProcessor''' a__ : Any = '''AutoTokenizer''' def __init__( self : List[Any] , _lowercase : Tuple=None , _lowercase : Dict=None , **_lowercase : Union[str, Any] ): __UpperCAmelCase = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , lowerCAmelCase__ , ) __UpperCAmelCase = kwargs.pop('''feature_extractor''' ) __UpperCAmelCase = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) __UpperCAmelCase = self.image_processor __UpperCAmelCase = False def __call__( self : int , *_lowercase : List[Any] , **_lowercase : Dict ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*lowerCAmelCase__ , **lowerCAmelCase__ ) __UpperCAmelCase = kwargs.pop('''images''' , lowerCAmelCase__ ) __UpperCAmelCase = kwargs.pop('''text''' , lowerCAmelCase__ ) if len(lowerCAmelCase__ ) > 0: __UpperCAmelCase = args[0] __UpperCAmelCase = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: __UpperCAmelCase = self.image_processor(lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) if text is not None: __UpperCAmelCase = self.tokenizer(lowerCAmelCase__ , **lowerCAmelCase__ ) if text is None: return inputs elif images is None: return encodings else: __UpperCAmelCase = encodings['''input_ids'''] return inputs def a ( self : Optional[int] , *_lowercase : Any , **_lowercase : int ): return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) def a ( self : int , *_lowercase : Union[str, Any] , **_lowercase : Any ): return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ ) @contextmanager def a ( self : str ): warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) __UpperCAmelCase = True __UpperCAmelCase = self.tokenizer yield __UpperCAmelCase = self.image_processor __UpperCAmelCase = False def a ( self : Dict , _lowercase : List[Any] , _lowercase : Optional[int]=False , _lowercase : List[str]=None ): if added_vocab is None: __UpperCAmelCase = self.tokenizer.get_added_vocab() __UpperCAmelCase = {} while tokens: __UpperCAmelCase = re.search(r'''<s_(.*?)>''' , lowerCAmelCase__ , re.IGNORECASE ) if start_token is None: break __UpperCAmelCase = start_token.group(1 ) __UpperCAmelCase = re.search(rF'''</s_{key}>''' , lowerCAmelCase__ , re.IGNORECASE ) __UpperCAmelCase = start_token.group() if end_token is None: __UpperCAmelCase = tokens.replace(lowerCAmelCase__ , '''''' ) else: __UpperCAmelCase = end_token.group() __UpperCAmelCase = re.escape(lowerCAmelCase__ ) __UpperCAmelCase = re.escape(lowerCAmelCase__ ) __UpperCAmelCase = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , lowerCAmelCase__ , re.IGNORECASE ) if content is not None: __UpperCAmelCase = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __UpperCAmelCase = self.tokenajson(lowerCAmelCase__ , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ ) if value: if len(lowerCAmelCase__ ) == 1: __UpperCAmelCase = value[0] __UpperCAmelCase = value else: # leaf nodes __UpperCAmelCase = [] for leaf in content.split(r'''<sep/>''' ): __UpperCAmelCase = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __UpperCAmelCase = leaf[1:-2] # for categorical special tokens output[key].append(lowerCAmelCase__ ) if len(output[key] ) == 1: __UpperCAmelCase = output[key][0] __UpperCAmelCase = tokens[tokens.find(lowerCAmelCase__ ) + len(lowerCAmelCase__ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=lowerCAmelCase__ , added_vocab=lowerCAmelCase__ ) if len(lowerCAmelCase__ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def a ( self : Optional[Any] ): warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowerCAmelCase__ , ) return self.image_processor_class @property def a ( self : Union[str, Any] ): warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , lowerCAmelCase__ , ) return self.image_processor
49
def _UpperCamelCase ( lowercase__ = 10**9 ): __SCREAMING_SNAKE_CASE : List[str] = 1 __SCREAMING_SNAKE_CASE : int = 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = 0 __SCREAMING_SNAKE_CASE : Dict = 0 __SCREAMING_SNAKE_CASE : Any = 0 while perimeter <= max_perimeter: perimeters_sum += perimeter prev_value += 2 * value value += prev_value __SCREAMING_SNAKE_CASE : Dict = 2 * value + 2 if i % 2 == 0 else 2 * value - 2 i += 1 return perimeters_sum if __name__ == "__main__": print(f"""{solution() = }""")
696
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase_ = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase_ = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
256
def _UpperCamelCase ( lowercase__ , lowercase__ ): __SCREAMING_SNAKE_CASE : str = len(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = len(lowercase__ ) __SCREAMING_SNAKE_CASE : List[str] = [[False for _ in range(m + 1 )] for _ in range(n + 1 )] __SCREAMING_SNAKE_CASE : str = True for i in range(lowercase__ ): for j in range(m + 1 ): if dp[i][j]: if j < m and a[i].upper() == b[j]: __SCREAMING_SNAKE_CASE : Union[str, Any] = True if a[i].islower(): __SCREAMING_SNAKE_CASE : Union[str, Any] = True return dp[n][m] if __name__ == "__main__": import doctest doctest.testmod()
696
0
"""simple docstring""" from binascii import hexlify from hashlib import shaaaa from os import urandom # RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for # Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526 __lowerCamelCase = { # 1536-bit 5: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 2048-bit 14: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AACAA68FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 3072-bit 15: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 4096-bit 16: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199" + "FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 6144-bit 17: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08" + "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B" + "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9" + "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6" + "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8" + "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C" + "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718" + "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D" + "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D" + "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226" + "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC" + "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26" + "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB" + "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2" + "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127" + "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406" + "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918" + "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151" + "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03" + "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F" + "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B" + "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632" + "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E" + "6DCC4024FFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, # 8192-bit 18: { 'prime': int( "FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1" + "29024E088A67CC74020BBEA63B139B22514A08798E3404DD" + "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245" + "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED" + "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D" + "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F" + "83655D23DCA3AD961C62F356208552BB9ED529077096966D" + "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B" + "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9" + "DE2BCBF6955817183995497CEA956AE515D2261898FA0510" + "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64" + "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7" + "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B" + "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C" + "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31" + "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7" + "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA" + "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6" + "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED" + "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9" + "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492" + "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD" + "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831" + "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B" + "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF" + "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6" + "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3" + "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA" + "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328" + "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C" + "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE" + "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4" + "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300" + "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568" + "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9" + "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B" + "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A" + "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36" + "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1" + "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92" + "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47" + "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71" + "60C980DD98EDD3DFFFFFFFFFFFFFFFFF", base=16, ), 'generator': 2, }, } class _lowercase : def __init__( self , UpperCamelCase_ = 14 ): if group not in primes: raise ValueError('''Unsupported Group''' ) __magic_name__ = primes[group]['''prime'''] __magic_name__ = primes[group]['''generator'''] __magic_name__ = int(hexlify(urandom(32 ) ) , base=16 ) def lowerCAmelCase__ ( self ): return hex(self.__private_key )[2:] def lowerCAmelCase__ ( self ): __magic_name__ = pow(self.generator , self.__private_key , self.prime ) return hex(lowerCAmelCase__ )[2:] def lowerCAmelCase__ ( self , UpperCamelCase_ ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= key <= self.prime - 2 and pow(lowerCAmelCase__ , (self.prime - 1) // 2 , self.prime ) == 1 ) def lowerCAmelCase__ ( self , UpperCamelCase_ ): __magic_name__ = int(lowerCAmelCase__ , base=16 ) if not self.is_valid_public_key(lowerCAmelCase__ ): raise ValueError('''Invalid public key''' ) __magic_name__ = pow(lowerCAmelCase__ , self.__private_key , self.prime ) return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest() @staticmethod def lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ ): # check if the other public key is valid based on NIST SP800-56 return ( 2 <= remote_public_key_str <= prime - 2 and pow(lowerCAmelCase__ , (prime - 1) // 2 , lowerCAmelCase__ ) == 1 ) @staticmethod def lowerCAmelCase__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 14 ): __magic_name__ = int(lowerCAmelCase__ , base=16 ) __magic_name__ = int(lowerCAmelCase__ , base=16 ) __magic_name__ = primes[group]['''prime'''] if not DiffieHellman.is_valid_public_key_static(lowerCAmelCase__ , lowerCAmelCase__ ): raise ValueError('''Invalid public key''' ) __magic_name__ = pow(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) return shaaaa(str(lowerCAmelCase__ ).encode() ).hexdigest() if __name__ == "__main__": import doctest doctest.testmod()
490
from scipy.stats import pearsonr import datasets __lowerCAmelCase : str ='\nPearson correlation coefficient and p-value for testing non-correlation.\nThe Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.\nThe p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.\n' __lowerCAmelCase : Tuple ='\nArgs:\n predictions (`list` of `int`): Predicted class labels, as returned by a model.\n references (`list` of `int`): Ground truth labels.\n return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.\n\nReturns:\n pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.\n p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.\n\nExamples:\n\n Example 1-A simple example using only predictions and references.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n\n Example 2-The same as Example 1, but that also returns the `p-value`.\n >>> pearsonr_metric = datasets.load_metric("pearsonr")\n >>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)\n >>> print(sorted(list(results.keys())))\n [\'p-value\', \'pearsonr\']\n >>> print(round(results[\'pearsonr\'], 2))\n -0.74\n >>> print(round(results[\'p-value\'], 2))\n 0.15\n' __lowerCAmelCase : Optional[int] ='\n@article{2020SciPy-NMeth,\nauthor = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, Ilhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Antonio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\ntitle = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\njournal = {Nature Methods},\nyear = {2020},\nvolume = {17},\npages = {261--272},\nadsurl = {https://rdcu.be/b08Wh},\ndoi = {10.1038/s41592-019-0686-2},\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _lowercase ( datasets.Metric ): '''simple docstring''' def __magic_name__( self :Optional[int] ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''float''' ), '''references''': datasets.Value('''float''' ), } ) , reference_urls=['''https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'''] , ) def __magic_name__( self :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Tuple=False ) -> int: if return_pvalue: __SCREAMING_SNAKE_CASE : int = pearsonr(lowerCAmelCase__ , lowerCAmelCase__ ) return {"pearsonr": results[0], "p-value": results[1]} else: return {"pearsonr": float(pearsonr(lowerCAmelCase__ , lowerCAmelCase__ )[0] )}
696
0
'''simple docstring''' import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin __a = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class A__ ( A__ , unittest.TestCase ): """simple docstring""" UpperCamelCase_ : List[str] = BartphoTokenizer UpperCamelCase_ : str = False UpperCamelCase_ : Union[str, Any] = True def _lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" super().setUp() _UpperCAmelCase : str = ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] _UpperCAmelCase : List[Any] = dict(zip(lowerCAmelCase__ , range(len(lowerCAmelCase__ ) ) ) ) _UpperCAmelCase : Tuple = {'''unk_token''': '''<unk>'''} _UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] ) with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""" ) _UpperCAmelCase : Tuple = BartphoTokenizer(lowerCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def _lowerCAmelCase ( self : Optional[Any] , **lowerCAmelCase__ : Optional[Any] ) -> Tuple: """simple docstring""" kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) def _lowerCAmelCase ( self : str , lowerCAmelCase__ : List[str] ) -> List[Any]: """simple docstring""" _UpperCAmelCase : Any = '''This is a là test''' _UpperCAmelCase : Optional[Any] = '''This is a<unk><unk> test''' return input_text, output_text def _lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _UpperCAmelCase : Optional[int] = BartphoTokenizer(lowerCAmelCase__ , self.monolingual_vocab_file , **self.special_tokens_map ) _UpperCAmelCase : Tuple = '''This is a là test''' _UpperCAmelCase : List[Any] = '''▁This ▁is ▁a ▁l à ▁t est'''.split() _UpperCAmelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) _UpperCAmelCase : str = tokens + [tokenizer.unk_token] _UpperCAmelCase : Optional[int] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
494
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer __lowerCAmelCase : List[str] =logging.get_logger(__name__) __lowerCAmelCase : int ={'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : int ={ 'vocab_file': {'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt'}, 'tokenizer_file': { 'mobilebert-uncased': 'https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json' }, } __lowerCAmelCase : Optional[int] ={'mobilebert-uncased': 5_1_2} __lowerCAmelCase : Union[str, Any] ={} class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE__ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE__ : List[str] = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE__ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE__ : List[Any] = MobileBertTokenizer def __init__( self :Tuple , lowerCAmelCase__ :Optional[int]=None , lowerCAmelCase__ :Any=None , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]="[UNK]" , lowerCAmelCase__ :List[Any]="[SEP]" , lowerCAmelCase__ :List[Any]="[PAD]" , lowerCAmelCase__ :List[Any]="[CLS]" , lowerCAmelCase__ :Any="[MASK]" , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Tuple=None , **lowerCAmelCase__ :List[str] , ) -> Optional[Any]: super().__init__( lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , ) __SCREAMING_SNAKE_CASE : List[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , lowerCAmelCase__ ) != do_lower_case or normalizer_state.get('''strip_accents''' , lowerCAmelCase__ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , lowerCAmelCase__ ) != tokenize_chinese_chars ): __SCREAMING_SNAKE_CASE : int = getattr(lowerCAmelCase__ , normalizer_state.pop('''type''' ) ) __SCREAMING_SNAKE_CASE : Optional[int] = do_lower_case __SCREAMING_SNAKE_CASE : str = strip_accents __SCREAMING_SNAKE_CASE : Dict = tokenize_chinese_chars __SCREAMING_SNAKE_CASE : Union[str, Any] = normalizer_class(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = do_lower_case def __magic_name__( self :Optional[int] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[Any]=None ) -> Tuple: __SCREAMING_SNAKE_CASE : Optional[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __magic_name__( self :List[str] , lowerCAmelCase__ :List[int] , lowerCAmelCase__ :Optional[List[int]] = None ) -> List[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id] __SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :str , lowerCAmelCase__ :Optional[str] = None ) -> Tuple[str]: __SCREAMING_SNAKE_CASE : int = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__ ) return tuple(lowerCAmelCase__ )
696
0
"""simple docstring""" import numpy as np from transformers import Pipeline def _snake_case ( UpperCamelCase : Union[str, Any] ): UpperCAmelCase : List[Any] = np.max(lowercase__ , axis=-1 , keepdims=lowercase__ ) UpperCAmelCase : List[str] = np.exp(outputs - maxes ) return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=lowercase__ ) class SCREAMING_SNAKE_CASE__ ( A__ ): def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> str: '''simple docstring''' UpperCAmelCase : str = {} if "second_text" in kwargs: UpperCAmelCase : List[str] = kwargs['''second_text'''] return preprocess_kwargs, {}, {} def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> str: '''simple docstring''' return self.tokenizer(lowerCAmelCase__ , text_pair=lowerCAmelCase__ , return_tensors=self.framework ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]: '''simple docstring''' return self.model(**lowerCAmelCase__ ) def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Tuple: '''simple docstring''' UpperCAmelCase : int = model_outputs.logits[0].numpy() UpperCAmelCase : Tuple = softmax(lowerCAmelCase__ ) UpperCAmelCase : Optional[int] = np.argmax(lowerCAmelCase__ ) UpperCAmelCase : List[Any] = self.model.config.idalabel[best_class] UpperCAmelCase : List[Any] = probabilities[best_class].item() UpperCAmelCase : str = logits.tolist() return {"label": label, "score": score, "logits": logits}
160
import os def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = len(grid[0] ) __SCREAMING_SNAKE_CASE : str = len(lowercase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = 0 __SCREAMING_SNAKE_CASE : Any = 0 __SCREAMING_SNAKE_CASE : Dict = 0 # Check vertically, horizontally, diagonally at the same time (only works # for nxn grid) for i in range(lowercase__ ): for j in range(n_rows - 3 ): __SCREAMING_SNAKE_CASE : Union[str, Any] = grid[j][i] * grid[j + 1][i] * grid[j + 2][i] * grid[j + 3][i] __SCREAMING_SNAKE_CASE : Union[str, Any] = grid[i][j] * grid[i][j + 1] * grid[i][j + 2] * grid[i][j + 3] # Left-to-right diagonal (\) product if i < n_columns - 3: __SCREAMING_SNAKE_CASE : Any = ( grid[i][j] * grid[i + 1][j + 1] * grid[i + 2][j + 2] * grid[i + 3][j + 3] ) # Right-to-left diagonal(/) product if i > 2: __SCREAMING_SNAKE_CASE : Any = ( grid[i][j] * grid[i - 1][j + 1] * grid[i - 2][j + 2] * grid[i - 3][j + 3] ) __SCREAMING_SNAKE_CASE : Optional[int] = max( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if max_product > largest: __SCREAMING_SNAKE_CASE : Tuple = max_product return largest def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Optional[int] = [] with open(os.path.dirname(lowercase__ ) + '''/grid.txt''' ) as file: for line in file: grid.append(line.strip('''\n''' ).split(''' ''' ) ) __SCREAMING_SNAKE_CASE : str = [[int(lowercase__ ) for i in grid[j]] for j in range(len(lowercase__ ) )] return largest_product(lowercase__ ) if __name__ == "__main__": print(solution())
696
0
import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig SCREAMING_SNAKE_CASE__ : Optional[int] = { 'facebook/maskformer-swin-base-ade': ( 'https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json' ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__) class UpperCAmelCase_ ( A__ ): __lowerCamelCase = '''maskformer''' __lowerCamelCase = {'''hidden_size''': '''mask_feature_size'''} __lowerCamelCase = ['''resnet''', '''swin'''] __lowerCamelCase = ['''detr'''] def __init__( self , _lowerCAmelCase = 256 , _lowerCAmelCase = 256 , _lowerCAmelCase = 0.1 , _lowerCAmelCase = False , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = 0.0_2 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = 1.0 , _lowerCAmelCase = 2_0.0 , _lowerCAmelCase = None , **_lowerCAmelCase , ): if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k UpperCAmelCase__ : Any = SwinConfig( image_size=384 , in_channels=3 , patch_size=4 , embed_dim=128 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] , ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : List[str] = backbone_config.pop("""model_type""" ) UpperCAmelCase__ : Dict = CONFIG_MAPPING[backbone_model_type] UpperCAmelCase__ : List[str] = config_class.from_dict(lowerCAmelCase__ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f"Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. " f"Supported model types: {','.join(self.backbones_supported )}" ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 UpperCAmelCase__ : List[str] = DetrConfig() else: # verify that the decoder is supported UpperCAmelCase__ : Dict = ( decoder_config.pop("""model_type""" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f"Transformer Decoder {decoder_type} not supported, please use one of" f" {','.join(self.decoders_supported )}" ) if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): UpperCAmelCase__ : List[str] = CONFIG_MAPPING[decoder_type] UpperCAmelCase__ : int = config_class.from_dict(lowerCAmelCase__ ) UpperCAmelCase__ : Optional[Any] = backbone_config UpperCAmelCase__ : Union[str, Any] = decoder_config # main feature dimension for the model UpperCAmelCase__ : Optional[Any] = fpn_feature_size UpperCAmelCase__ : int = mask_feature_size # initializer UpperCAmelCase__ : List[str] = init_std UpperCAmelCase__ : int = init_xavier_std # Hungarian matcher && loss UpperCAmelCase__ : Union[str, Any] = cross_entropy_weight UpperCAmelCase__ : List[Any] = dice_weight UpperCAmelCase__ : Optional[Any] = mask_weight UpperCAmelCase__ : str = use_auxiliary_loss UpperCAmelCase__ : Dict = no_object_weight UpperCAmelCase__ : Tuple = output_auxiliary_logits UpperCAmelCase__ : Any = self.decoder_config.encoder_attention_heads UpperCAmelCase__ : List[str] = self.decoder_config.num_hidden_layers super().__init__(**lowerCAmelCase__ ) @classmethod def __UpperCAmelCase ( cls , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ): return cls( backbone_config=lowerCAmelCase__ , decoder_config=lowerCAmelCase__ , **lowerCAmelCase__ , ) def __UpperCAmelCase ( self ): UpperCAmelCase__ : str = copy.deepcopy(self.__dict__ ) UpperCAmelCase__ : List[str] = self.backbone_config.to_dict() UpperCAmelCase__ : Optional[Any] = self.decoder_config.to_dict() UpperCAmelCase__ : Any = self.__class__.model_type return output
79
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class _lowercase ( A__ ): '''simple docstring''' def __magic_name__( self :List[Any] ) -> Any: __SCREAMING_SNAKE_CASE : List[Any] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''tf_padding''' ) ) self.parent.assertTrue(hasattr(lowerCAmelCase__ , '''depth_multiplier''' ) ) class _lowercase : '''simple docstring''' def __init__( self :List[str] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :List[Any]=13 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Optional[Any]=32 , lowerCAmelCase__ :Dict=0.25 , lowerCAmelCase__ :Optional[int]=8 , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Union[str, Any]=1_024 , lowerCAmelCase__ :Any=32 , lowerCAmelCase__ :Tuple="relu6" , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :Dict=0.02 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :int=True , lowerCAmelCase__ :int=10 , lowerCAmelCase__ :Union[str, Any]=None , ) -> str: __SCREAMING_SNAKE_CASE : Any = parent __SCREAMING_SNAKE_CASE : Dict = batch_size __SCREAMING_SNAKE_CASE : List[Any] = num_channels __SCREAMING_SNAKE_CASE : Union[str, Any] = image_size __SCREAMING_SNAKE_CASE : Optional[int] = depth_multiplier __SCREAMING_SNAKE_CASE : Dict = min_depth __SCREAMING_SNAKE_CASE : List[str] = tf_padding __SCREAMING_SNAKE_CASE : List[Any] = int(last_hidden_size * depth_multiplier ) __SCREAMING_SNAKE_CASE : List[str] = output_stride __SCREAMING_SNAKE_CASE : Any = hidden_act __SCREAMING_SNAKE_CASE : Union[str, Any] = classifier_dropout_prob __SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = is_training __SCREAMING_SNAKE_CASE : Optional[int] = num_labels __SCREAMING_SNAKE_CASE : Union[str, Any] = initializer_range __SCREAMING_SNAKE_CASE : Optional[int] = scope def __magic_name__( self :List[str] ) -> int: __SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = None __SCREAMING_SNAKE_CASE : Optional[Any] = None if self.use_labels: __SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __SCREAMING_SNAKE_CASE : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) __SCREAMING_SNAKE_CASE : Any = self.get_config() return config, pixel_values, labels, pixel_labels def __magic_name__( self :Union[str, Any] ) -> Optional[Any]: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __magic_name__( self :List[Any] , lowerCAmelCase__ :int , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = MobileNetVaModel(config=lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE : List[str] = model(lowerCAmelCase__ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __magic_name__( self :List[Any] , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Optional[int] , lowerCAmelCase__ :Dict , lowerCAmelCase__ :Union[str, Any] ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Tuple = self.num_labels __SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaForImageClassification(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() __SCREAMING_SNAKE_CASE : List[Any] = model(lowerCAmelCase__ , labels=lowerCAmelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __magic_name__( self :List[Any] ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = self.prepare_config_and_inputs() __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : str = config_and_inputs __SCREAMING_SNAKE_CASE : Dict = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _lowercase ( A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE__ : Optional[Any] = ( {'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : Any = False SCREAMING_SNAKE_CASE__ : str = False SCREAMING_SNAKE_CASE__ : Tuple = False def __magic_name__( self :Any ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = MobileNetVaModelTester(self ) __SCREAMING_SNAKE_CASE : Optional[int] = MobileNetVaConfigTester(self , config_class=lowerCAmelCase__ , has_text_modality=lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> int: self.config_tester.run_common_tests() @unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' ) def __magic_name__( self :Dict ) -> Optional[Any]: pass @unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' ) def __magic_name__( self :List[Any] ) -> List[Any]: pass @unittest.skip(reason='''MobileNetV1 does not output attentions''' ) def __magic_name__( self :Any ) -> Dict: pass def __magic_name__( self :Any ) -> List[Any]: __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : Any = model_class(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __SCREAMING_SNAKE_CASE : Union[str, Any] = [*signature.parameters.keys()] __SCREAMING_SNAKE_CASE : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase__ ) def __magic_name__( self :Any ) -> Tuple: __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__ ) def __magic_name__( self :Union[str, Any] ) -> Tuple: def check_hidden_states_output(lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] , lowerCAmelCase__ :Union[str, Any] ): __SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(lowerCAmelCase__ ) model.to(lowerCAmelCase__ ) model.eval() with torch.no_grad(): __SCREAMING_SNAKE_CASE : Optional[Any] = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) ) __SCREAMING_SNAKE_CASE : Optional[Any] = outputs.hidden_states __SCREAMING_SNAKE_CASE : Optional[int] = 26 self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __SCREAMING_SNAKE_CASE : str = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __SCREAMING_SNAKE_CASE : List[Any] = True check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase__ ) @slow def __magic_name__( self :List[str] ) -> List[Any]: for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __SCREAMING_SNAKE_CASE : Optional[Any] = MobileNetVaModel.from_pretrained(lowerCAmelCase__ ) self.assertIsNotNone(lowerCAmelCase__ ) def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _lowercase ( unittest.TestCase ): '''simple docstring''' @cached_property def __magic_name__( self :Optional[int] ) -> Union[str, Any]: return ( MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None ) @slow def __magic_name__( self :Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[str] = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.default_image_processor __SCREAMING_SNAKE_CASE : int = prepare_img() __SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=lowerCAmelCase__ , return_tensors='''pt''' ).to(lowerCAmelCase__ ) # forward pass with torch.no_grad(): __SCREAMING_SNAKE_CASE : int = model(**lowerCAmelCase__ ) # verify the logits __SCREAMING_SNAKE_CASE : Any = torch.Size((1, 1_001) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(lowerCAmelCase__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase__ , atol=1E-4 ) )
696
0
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] ) -> List[str]: return number & 1 == 0 if __name__ == "__main__": import doctest doctest.testmod()
504
import os from datetime import datetime as dt from github import Github __lowerCAmelCase : List[Any] =[ 'good first issue', 'good second issue', 'good difficult issue', 'enhancement', 'new pipeline/model', 'new scheduler', 'wip', ] def _UpperCamelCase ( ): __SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] ) __SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' ) for issue in open_issues: __SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ ) __SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Closes the issue after 7 days of inactivity since the Stalebot notification. issue.edit(state='''closed''' ) elif ( "stale" in issue.get_labels() and last_comment is not None and last_comment.user.login != "github-actions[bot]" ): # Opens the issue if someone other than Stalebot commented. issue.edit(state='''open''' ) issue.remove_from_labels('''stale''' ) elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() ) ): # Post a Stalebot notification after 23 days of inactivity. issue.create_comment( '''This issue has been automatically marked as stale because it has not had ''' '''recent activity. If you think this still needs to be addressed ''' '''please comment on this thread.\n\nPlease note that issues that do not follow the ''' '''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) ''' '''are likely to be ignored.''' ) issue.add_to_labels('''stale''' ) if __name__ == "__main__": main()
696
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase : List[Any] = logging.get_logger(__name__) lowerCamelCase : Tuple = { 'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json', } class A__ ( A__ ): A__ = '''transfo-xl''' A__ = ['''mems'''] A__ = { '''n_token''': '''vocab_size''', '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self : str , _a : Optional[int]=26_7735 , _a : Optional[int]=[2_0000, 4_0000, 20_0000] , _a : List[Any]=1024 , _a : List[str]=1024 , _a : Any=16 , _a : Tuple=64 , _a : Union[str, Any]=4096 , _a : Dict=4 , _a : Optional[Any]=False , _a : Dict=18 , _a : Union[str, Any]=1600 , _a : Union[str, Any]=1000 , _a : Optional[Any]=True , _a : Union[str, Any]=True , _a : Optional[Any]=0 , _a : Union[str, Any]=-1 , _a : List[str]=True , _a : List[str]=0.1 , _a : str=0.0 , _a : int=True , _a : str="normal" , _a : Tuple=0.01 , _a : Union[str, Any]=0.01 , _a : str=0.02 , _a : Optional[Any]=1e-5 , _a : Union[str, Any]=0 , **_a : Optional[Any] , ) -> str: '''simple docstring''' _SCREAMING_SNAKE_CASE =vocab_size _SCREAMING_SNAKE_CASE =[] self.cutoffs.extend(lowerCAmelCase__ ) if proj_share_all_but_first: _SCREAMING_SNAKE_CASE =[False] + [True] * len(self.cutoffs ) else: _SCREAMING_SNAKE_CASE =[False] + [False] * len(self.cutoffs ) _SCREAMING_SNAKE_CASE =d_model _SCREAMING_SNAKE_CASE =d_embed _SCREAMING_SNAKE_CASE =d_head _SCREAMING_SNAKE_CASE =d_inner _SCREAMING_SNAKE_CASE =div_val _SCREAMING_SNAKE_CASE =pre_lnorm _SCREAMING_SNAKE_CASE =n_layer _SCREAMING_SNAKE_CASE =n_head _SCREAMING_SNAKE_CASE =mem_len _SCREAMING_SNAKE_CASE =same_length _SCREAMING_SNAKE_CASE =attn_type _SCREAMING_SNAKE_CASE =clamp_len _SCREAMING_SNAKE_CASE =sample_softmax _SCREAMING_SNAKE_CASE =adaptive _SCREAMING_SNAKE_CASE =dropout _SCREAMING_SNAKE_CASE =dropatt _SCREAMING_SNAKE_CASE =untie_r _SCREAMING_SNAKE_CASE =init _SCREAMING_SNAKE_CASE =init_range _SCREAMING_SNAKE_CASE =proj_init_std _SCREAMING_SNAKE_CASE =init_std _SCREAMING_SNAKE_CASE =layer_norm_epsilon super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def A ( self : str ) -> int: '''simple docstring''' logger.info(f"The model {self.model_type} is one of the few models that has no sequence length limit." ) return -1 @max_position_embeddings.setter def A ( self : Tuple , _a : int ) -> Dict: '''simple docstring''' raise NotImplementedError( f"The model {self.model_type} is one of the few models that has no sequence length limit." )
405
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : Dict ={ 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = '''canine''' def __init__( self :Any , lowerCAmelCase__ :List[Any]=768 , lowerCAmelCase__ :Any=12 , lowerCAmelCase__ :str=12 , lowerCAmelCase__ :Optional[int]=3_072 , lowerCAmelCase__ :str="gelu" , lowerCAmelCase__ :Union[str, Any]=0.1 , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :int=16_384 , lowerCAmelCase__ :Tuple=16 , lowerCAmelCase__ :List[Any]=0.02 , lowerCAmelCase__ :int=1E-1_2 , lowerCAmelCase__ :int=0 , lowerCAmelCase__ :List[Any]=0xe000 , lowerCAmelCase__ :List[str]=0xe001 , lowerCAmelCase__ :str=4 , lowerCAmelCase__ :Any=4 , lowerCAmelCase__ :Union[str, Any]=8 , lowerCAmelCase__ :Optional[int]=16_384 , lowerCAmelCase__ :Any=128 , **lowerCAmelCase__ :Optional[Any] , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size __SCREAMING_SNAKE_CASE : str = num_hidden_layers __SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads __SCREAMING_SNAKE_CASE : Optional[Any] = intermediate_size __SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_act __SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob __SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Dict = initializer_range __SCREAMING_SNAKE_CASE : int = type_vocab_size __SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps # Character config: __SCREAMING_SNAKE_CASE : Tuple = downsampling_rate __SCREAMING_SNAKE_CASE : Optional[Any] = upsampling_kernel_size __SCREAMING_SNAKE_CASE : Any = num_hash_functions __SCREAMING_SNAKE_CASE : Optional[int] = num_hash_buckets __SCREAMING_SNAKE_CASE : List[str] = local_transformer_stride
696
0
import importlib import torch import yaml from omegaconf import OmegaConf from taming.models.vqgan import VQModel def a(lowercase__ , lowercase__=False ): '''simple docstring''' snake_case_ = OmegaConf.load(lowercase__ ) if display: print(yaml.dump(OmegaConf.to_container(lowercase__ ) ) ) return config def a(lowercase__ , lowercase__=None , lowercase__=None ): '''simple docstring''' if conf_path is None: snake_case_ = '''./model_checkpoints/vqgan_only.yaml''' snake_case_ = load_config(lowercase__ , display=lowercase__ ) snake_case_ = VQModel(**config.model.params ) if ckpt_path is None: snake_case_ = '''./model_checkpoints/vqgan_only.pt''' snake_case_ = torch.load(lowercase__ , map_location=lowercase__ ) if ".ckpt" in ckpt_path: snake_case_ = sd['''state_dict'''] model.load_state_dict(lowercase__ , strict=lowercase__ ) model.to(lowercase__ ) del sd return model def a(lowercase__ , lowercase__ ): '''simple docstring''' snake_case_ = model.encode(lowercase__ ) print(f"""VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}""" ) snake_case_ = model.decode(lowercase__ ) return xrec def a(lowercase__ , lowercase__=False ): '''simple docstring''' snake_case_ = string.rsplit('.' , 1 ) if reload: snake_case_ = importlib.import_module(lowercase__ ) importlib.reload(lowercase__ ) return getattr(importlib.import_module(lowercase__ , package=lowercase__ ) , cls ) def a(lowercase__ ): '''simple docstring''' if "target" not in config: raise KeyError('Expected key `target` to instantiate.' ) return get_obj_from_str(config['target'] )(**config.get('params' , {} ) ) def a(lowercase__ , lowercase__ , lowercase__=True , lowercase__=True ): '''simple docstring''' snake_case_ = instantiate_from_config(lowercase__ ) if sd is not None: model.load_state_dict(lowercase__ ) if gpu: model.cuda() if eval_mode: model.eval() return {"model": model} def a(lowercase__ , lowercase__ , lowercase__ , lowercase__ ): '''simple docstring''' # load the specified checkpoint if ckpt: snake_case_ = torch.load(lowercase__ , map_location='cpu' ) snake_case_ = pl_sd['''global_step'''] print(f"""loaded model from global step {global_step}.""" ) else: snake_case_ = {'''state_dict''': None} snake_case_ = None snake_case_ = load_model_from_config(config.model , pl_sd['state_dict'] , gpu=lowercase__ , eval_mode=lowercase__ )['''model'''] return model, global_step
187
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : List[Any] =logging.get_logger(__name__) __lowerCAmelCase : Tuple ={ 'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = '''transfo-xl''' SCREAMING_SNAKE_CASE__ : List[str] = ['''mems'''] SCREAMING_SNAKE_CASE__ : List[Any] = { '''n_token''': '''vocab_size''', '''hidden_size''': '''d_model''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self :str , lowerCAmelCase__ :Optional[int]=267_735 , lowerCAmelCase__ :Optional[int]=[20_000, 40_000, 200_000] , lowerCAmelCase__ :List[Any]=1_024 , lowerCAmelCase__ :List[str]=1_024 , lowerCAmelCase__ :Any=16 , lowerCAmelCase__ :Tuple=64 , lowerCAmelCase__ :Union[str, Any]=4_096 , lowerCAmelCase__ :Dict=4 , lowerCAmelCase__ :Optional[Any]=False , lowerCAmelCase__ :Dict=18 , lowerCAmelCase__ :Union[str, Any]=1_600 , lowerCAmelCase__ :Union[str, Any]=1_000 , lowerCAmelCase__ :Optional[Any]=True , lowerCAmelCase__ :Union[str, Any]=True , lowerCAmelCase__ :Optional[Any]=0 , lowerCAmelCase__ :Union[str, Any]=-1 , lowerCAmelCase__ :List[str]=True , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.0 , lowerCAmelCase__ :int=True , lowerCAmelCase__ :str="normal" , lowerCAmelCase__ :Tuple=0.01 , lowerCAmelCase__ :Union[str, Any]=0.01 , lowerCAmelCase__ :str=0.02 , lowerCAmelCase__ :Optional[Any]=1E-5 , lowerCAmelCase__ :Union[str, Any]=0 , **lowerCAmelCase__ :Optional[Any] , ) -> str: __SCREAMING_SNAKE_CASE : str = vocab_size __SCREAMING_SNAKE_CASE : Tuple = [] self.cutoffs.extend(lowerCAmelCase__ ) if proj_share_all_but_first: __SCREAMING_SNAKE_CASE : List[str] = [False] + [True] * len(self.cutoffs ) else: __SCREAMING_SNAKE_CASE : Tuple = [False] + [False] * len(self.cutoffs ) __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : Union[str, Any] = d_embed __SCREAMING_SNAKE_CASE : Tuple = d_head __SCREAMING_SNAKE_CASE : Dict = d_inner __SCREAMING_SNAKE_CASE : Optional[Any] = div_val __SCREAMING_SNAKE_CASE : Optional[Any] = pre_lnorm __SCREAMING_SNAKE_CASE : List[str] = n_layer __SCREAMING_SNAKE_CASE : int = n_head __SCREAMING_SNAKE_CASE : str = mem_len __SCREAMING_SNAKE_CASE : Union[str, Any] = same_length __SCREAMING_SNAKE_CASE : str = attn_type __SCREAMING_SNAKE_CASE : Dict = clamp_len __SCREAMING_SNAKE_CASE : Tuple = sample_softmax __SCREAMING_SNAKE_CASE : Optional[int] = adaptive __SCREAMING_SNAKE_CASE : int = dropout __SCREAMING_SNAKE_CASE : Optional[Any] = dropatt __SCREAMING_SNAKE_CASE : int = untie_r __SCREAMING_SNAKE_CASE : Optional[int] = init __SCREAMING_SNAKE_CASE : List[str] = init_range __SCREAMING_SNAKE_CASE : Any = proj_init_std __SCREAMING_SNAKE_CASE : List[str] = init_std __SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) @property def __magic_name__( self :str ) -> int: # Message copied from Transformer-XL documentation logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' ) return -1 @max_position_embeddings.setter def __magic_name__( self :Tuple , lowerCAmelCase__ :int ) -> Dict: # Message copied from Transformer-XL documentation raise NotImplementedError( f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
696
0
def _lowerCAmelCase ( __magic_name__ :str , __magic_name__ :Union[str, Any] ): return price * (1 + tax_rate) if __name__ == "__main__": print(f"{price_plus_tax(100, 0.25) = }") print(f"{price_plus_tax(125.50, 0.05) = }")
121
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : str =logging.get_logger(__name__) __lowerCAmelCase : Any ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = '''megatron-bert''' def __init__( self :int , lowerCAmelCase__ :int=29_056 , lowerCAmelCase__ :Dict=1_024 , lowerCAmelCase__ :Optional[int]=24 , lowerCAmelCase__ :str=16 , lowerCAmelCase__ :Optional[int]=4_096 , lowerCAmelCase__ :Optional[Any]="gelu" , lowerCAmelCase__ :List[str]=0.1 , lowerCAmelCase__ :str=0.1 , lowerCAmelCase__ :List[str]=512 , lowerCAmelCase__ :Any=2 , lowerCAmelCase__ :int=0.02 , lowerCAmelCase__ :Tuple=1E-1_2 , lowerCAmelCase__ :Tuple=0 , lowerCAmelCase__ :Optional[int]="absolute" , lowerCAmelCase__ :List[str]=True , **lowerCAmelCase__ :Tuple , ) -> Optional[Any]: super().__init__(pad_token_id=lowerCAmelCase__ , **lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = vocab_size __SCREAMING_SNAKE_CASE : List[str] = hidden_size __SCREAMING_SNAKE_CASE : List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE : Tuple = hidden_act __SCREAMING_SNAKE_CASE : Any = intermediate_size __SCREAMING_SNAKE_CASE : List[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE : Optional[int] = max_position_embeddings __SCREAMING_SNAKE_CASE : List[Any] = type_vocab_size __SCREAMING_SNAKE_CASE : str = initializer_range __SCREAMING_SNAKE_CASE : Dict = layer_norm_eps __SCREAMING_SNAKE_CASE : Dict = position_embedding_type __SCREAMING_SNAKE_CASE : Union[str, Any] = use_cache
696
0
"""simple docstring""" import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class _UpperCAmelCase ( unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Optional[Any] = MODEL_FOR_MASKED_LM_MAPPING SCREAMING_SNAKE_CASE_ : Any = TF_MODEL_FOR_MASKED_LM_MAPPING def A ( self : str ) -> List[Any]: super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def A ( self : List[Any] ) -> Optional[int]: lowercase_ : str = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''tf''' ) lowercase_ : List[str] = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6 ) , [ {'''sequence''': '''My name is grouped''', '''score''': 2.1e-05, '''token''': 3_80_15, '''token_str''': ''' grouped'''}, {'''sequence''': '''My name is accuser''', '''score''': 2.1e-05, '''token''': 2_55_06, '''token_str''': ''' accuser'''}, ] , ) lowercase_ : List[Any] = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6 ) , [ { '''sequence''': '''The largest city in France is grouped''', '''score''': 2.1e-05, '''token''': 3_80_15, '''token_str''': ''' grouped''', }, { '''sequence''': '''The largest city in France is accuser''', '''score''': 2.1e-05, '''token''': 2_55_06, '''token_str''': ''' accuser''', }, ] , ) lowercase_ : Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6 ) , [ {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Patrick''', '''score''': 2e-05, '''token''': 34_99, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 1.9e-05, '''token''': 29_41, '''token_str''': ''' Te'''}, ] , ) @require_torch def A ( self : Union[str, Any] ) -> List[Any]: lowercase_ : List[Any] = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , top_k=2 , framework='''pt''' ) lowercase_ : Optional[Any] = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6 ) , [ {'''sequence''': '''My name is Maul''', '''score''': 2.2e-05, '''token''': 3_56_76, '''token_str''': ''' Maul'''}, {'''sequence''': '''My name isELS''', '''score''': 2.2e-05, '''token''': 1_64_16, '''token_str''': '''ELS'''}, ] , ) lowercase_ : Optional[Any] = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6 ) , [ { '''sequence''': '''The largest city in France is Maul''', '''score''': 2.2e-05, '''token''': 3_56_76, '''token_str''': ''' Maul''', }, {'''sequence''': '''The largest city in France isELS''', '''score''': 2.2e-05, '''token''': 1_64_16, '''token_str''': '''ELS'''}, ] , ) lowercase_ : Tuple = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6 ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 2.1e-05, '''token''': 34_99, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Te''', '''score''': 2e-05, '''token''': 29_41, '''token_str''': ''' Te'''}, {'''sequence''': '''My name is Clara''', '''score''': 2e-05, '''token''': 1_36_06, '''token_str''': ''' Clara'''}, ] , ) lowercase_ : List[Any] = unmasker('''My name is <mask> <mask>''' , top_k=2 ) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6 ) , [ [ { '''score''': 2.2e-05, '''token''': 3_56_76, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is Maul<mask></s>''', }, {'''score''': 2.2e-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name isELS<mask></s>'''}, ], [ { '''score''': 2.2e-05, '''token''': 3_56_76, '''token_str''': ''' Maul''', '''sequence''': '''<s>My name is<mask> Maul</s>''', }, {'''score''': 2.2e-05, '''token''': 1_64_16, '''token_str''': '''ELS''', '''sequence''': '''<s>My name is<mask>ELS</s>'''}, ], ] , ) @require_torch_gpu def A ( self : Any ) -> int: lowercase_ : Tuple = pipeline('''fill-mask''' , model='''hf-internal-testing/tiny-random-distilbert''' , device=0 , framework='''pt''' ) # convert model to fp16 pipe.model.half() lowercase_ : int = pipe('''Paris is the [MASK] of France.''' ) # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) @slow @require_torch def A ( self : List[Any] ) -> Union[str, Any]: lowercase_ : Any = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''pt''' ) self.run_large_test(lowerCAmelCase__ ) @slow @require_tf def A ( self : List[str] ) -> Dict: lowercase_ : List[str] = pipeline(task='''fill-mask''' , model='''distilroberta-base''' , top_k=2 , framework='''tf''' ) self.run_large_test(lowerCAmelCase__ ) def A ( self : Tuple , A : List[str] ) -> Union[str, Any]: lowercase_ : Dict = unmasker('''My name is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ {'''sequence''': '''My name is John''', '''score''': 0.008, '''token''': 6_10, '''token_str''': ''' John'''}, {'''sequence''': '''My name is Chris''', '''score''': 0.007, '''token''': 15_73, '''token_str''': ''' Chris'''}, ] , ) lowercase_ : Optional[int] = unmasker('''The largest city in France is <mask>''' ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ { '''sequence''': '''The largest city in France is Paris''', '''score''': 0.251, '''token''': 22_01, '''token_str''': ''' Paris''', }, { '''sequence''': '''The largest city in France is Lyon''', '''score''': 0.214, '''token''': 1_27_90, '''token_str''': ''' Lyon''', }, ] , ) lowercase_ : Optional[Any] = unmasker('''My name is <mask>''' , targets=[''' Patrick''', ''' Clara''', ''' Teven'''] , top_k=3 ) self.assertEqual( nested_simplify(lowerCAmelCase__ ) , [ {'''sequence''': '''My name is Patrick''', '''score''': 0.005, '''token''': 34_99, '''token_str''': ''' Patrick'''}, {'''sequence''': '''My name is Clara''', '''score''': 0.000, '''token''': 1_36_06, '''token_str''': ''' Clara'''}, {'''sequence''': '''My name is Te''', '''score''': 0.000, '''token''': 29_41, '''token_str''': ''' Te'''}, ] , ) @require_torch def A ( self : List[str] ) -> Optional[int]: lowercase_ : Dict = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''pt''' ) lowercase_ : Union[str, Any] = None lowercase_ : Dict = None self.run_pipeline_test(lowerCAmelCase__ , [] ) @require_tf def A ( self : Optional[int] ) -> Union[str, Any]: lowercase_ : Tuple = pipeline(task='''fill-mask''' , model='''sshleifer/tiny-distilroberta-base''' , framework='''tf''' ) lowercase_ : str = None lowercase_ : Tuple = None self.run_pipeline_test(lowerCAmelCase__ , [] ) def A ( self : Union[str, Any] , A : Any , A : Dict , A : int ) -> Union[str, Any]: if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest('''The provided tokenizer has no mask token, (probably reformer or wav2vec2)''' ) lowercase_ : Union[str, Any] = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) lowercase_ : Tuple = [ F'''This is another {tokenizer.mask_token} test''', ] return fill_masker, examples def A ( self : Any , A : Tuple , A : Optional[int] ) -> str: lowercase_ : List[Any] = fill_masker.tokenizer lowercase_ : Union[str, Any] = fill_masker.model lowercase_ : Any = fill_masker( F'''This is a {tokenizer.mask_token}''' , ) self.assertEqual( lowerCAmelCase__ , [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ] , ) lowercase_ : Optional[int] = fill_masker([F'''This is a {tokenizer.mask_token}'''] ) self.assertEqual( lowerCAmelCase__ , [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ] , ) lowercase_ : Optional[Any] = fill_masker([F'''This is a {tokenizer.mask_token}''', F'''Another {tokenizer.mask_token} great test.'''] ) self.assertEqual( lowerCAmelCase__ , [ [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ], [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ], ] , ) with self.assertRaises(lowerCAmelCase__ ): fill_masker([None] ) # No mask_token is not supported with self.assertRaises(lowerCAmelCase__ ): fill_masker('''This is''' ) self.run_test_top_k(lowerCAmelCase__ , lowerCAmelCase__ ) self.run_test_targets(lowerCAmelCase__ , lowerCAmelCase__ ) self.run_test_top_k_targets(lowerCAmelCase__ , lowerCAmelCase__ ) self.fill_mask_with_duplicate_targets_and_top_k(lowerCAmelCase__ , lowerCAmelCase__ ) self.fill_mask_with_multiple_masks(lowerCAmelCase__ , lowerCAmelCase__ ) def A ( self : Optional[Any] , A : List[Any] , A : str ) -> Optional[int]: lowercase_ : Union[str, Any] = tokenizer.get_vocab() lowercase_ : List[str] = sorted(vocab.keys() )[:2] # Pipeline argument lowercase_ : List[str] = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , targets=lowerCAmelCase__ ) lowercase_ : List[str] = fill_masker(F'''This is a {tokenizer.mask_token}''' ) self.assertEqual( lowerCAmelCase__ , [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ] , ) lowercase_ : Any = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , lowerCAmelCase__ ) lowercase_ : str = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(lowerCAmelCase__ ) ) # Call argument lowercase_ : Tuple = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) lowercase_ : Optional[int] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase__ ) self.assertEqual( lowerCAmelCase__ , [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ] , ) lowercase_ : List[str] = {vocab[el] for el in targets} self.assertEqual({el['''token'''] for el in outputs} , lowerCAmelCase__ ) lowercase_ : Tuple = [tokenizer.decode([x] ) for x in target_ids] self.assertEqual({el['''token_str'''] for el in outputs} , set(lowerCAmelCase__ ) ) # Score equivalence lowercase_ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase__ ) lowercase_ : str = [top_mask['''token_str'''] for top_mask in outputs] lowercase_ : Tuple = [top_mask['''score'''] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCAmelCase__ ) == set(lowerCAmelCase__ ): lowercase_ : List[str] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase__ ) lowercase_ : List[Any] = [top_mask['''score'''] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(lowerCAmelCase__ ) , nested_simplify(lowerCAmelCase__ ) ) # Raises with invalid with self.assertRaises(lowerCAmelCase__ ): lowercase_ : List[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[] ) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(lowerCAmelCase__ ): lowercase_ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets=[''''''] ) with self.assertRaises(lowerCAmelCase__ ): lowercase_ : Optional[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , targets='''''' ) def A ( self : str , A : str , A : Union[str, Any] ) -> Dict: lowercase_ : Dict = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , top_k=2 ) lowercase_ : Dict = fill_masker(F'''This is a {tokenizer.mask_token}''' ) self.assertEqual( lowerCAmelCase__ , [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ] , ) lowercase_ : Tuple = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) lowercase_ : List[Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( lowerCAmelCase__ , [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ] , ) self.assertEqual(nested_simplify(lowerCAmelCase__ ) , nested_simplify(lowerCAmelCase__ ) ) def A ( self : str , A : Union[str, Any] , A : Optional[Any] ) -> int: lowercase_ : List[str] = tokenizer.get_vocab() lowercase_ : Optional[int] = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) # top_k=2, ntargets=3 lowercase_ : List[str] = sorted(vocab.keys() )[:3] lowercase_ : str = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=lowerCAmelCase__ ) # If we use the most probably targets, and filter differently, we should still # have the same results lowercase_ : List[str] = [el['''token_str'''] for el in sorted(lowerCAmelCase__ , key=lambda A : x["score"] , reverse=lowerCAmelCase__ )] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCAmelCase__ ).issubset(lowerCAmelCase__ ): lowercase_ : Union[str, Any] = fill_masker(F'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=lowerCAmelCase__ ) # They should yield exactly the same result self.assertEqual(nested_simplify(lowerCAmelCase__ ) , nested_simplify(lowerCAmelCase__ ) ) def A ( self : Dict , A : Tuple , A : Optional[Any] ) -> int: lowercase_ : Any = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) lowercase_ : int = tokenizer.get_vocab() # String duplicates + id duplicates lowercase_ : Dict = sorted(vocab.keys() )[:3] lowercase_ : Optional[int] = [targets[0], targets[1], targets[0], targets[2], targets[1]] lowercase_ : Tuple = fill_masker(F'''My name is {tokenizer.mask_token}''' , targets=lowerCAmelCase__ , top_k=10 ) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(lowerCAmelCase__ ) , 3 ) def A ( self : Optional[int] , A : List[Any] , A : Union[str, Any] ) -> List[Any]: lowercase_ : List[Any] = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ ) lowercase_ : Tuple = fill_masker( F'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 ) self.assertEqual( lowerCAmelCase__ , [ [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ], [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ], [ {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, {'''sequence''': ANY(lowerCAmelCase__ ), '''score''': ANY(lowerCAmelCase__ ), '''token''': ANY(lowerCAmelCase__ ), '''token_str''': ANY(lowerCAmelCase__ )}, ], ] , )
231
import os import sys import unittest __lowerCAmelCase : List[Any] =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path __lowerCAmelCase : Optional[Any] =os.path.join(git_repo_path, 'src', 'transformers') __lowerCAmelCase : Optional[Any] ='\n{0} = None\n' __lowerCAmelCase : Tuple ='\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n' __lowerCAmelCase : Dict ='\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Tuple ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = find_backend(''' _import_structure["models.albert"].append("AlbertTokenizerFast")''' ) self.assertIsNone(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tokenizers_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tokenizers''' ) __SCREAMING_SNAKE_CASE : Dict = find_backend(''' if not is_tensorflow_text_available():''' ) self.assertEqual(lowerCAmelCase__ , '''tensorflow_text''' ) __SCREAMING_SNAKE_CASE : Tuple = find_backend(''' if not (is_sentencepiece_available() and is_tokenizers_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers''' ) __SCREAMING_SNAKE_CASE : Any = find_backend( ''' if not (is_sentencepiece_available() and is_tensorflow_text_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tensorflow_text''' ) __SCREAMING_SNAKE_CASE : List[str] = find_backend( ''' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):''' ) self.assertEqual(lowerCAmelCase__ , '''sentencepiece_and_tokenizers_and_vision''' ) def __magic_name__( self :List[str] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Union[str, Any] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn('''torch''' , lowerCAmelCase__ ) self.assertIn('''tensorflow_text''' , lowerCAmelCase__ ) self.assertIn('''sentencepiece_and_tokenizers''' , lowerCAmelCase__ ) # Likewise, we can't assert on the exact content of a key self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertModel''' , objects['''tf'''] ) self.assertIn('''FlaxBertModel''' , objects['''flax'''] ) self.assertIn('''BertModel''' , objects['''torch'''] ) self.assertIn('''TFBertTokenizer''' , objects['''tensorflow_text'''] ) self.assertIn('''convert_slow_tokenizer''' , objects['''sentencepiece_and_tokenizers'''] ) def __magic_name__( self :Optional[Any] ) -> List[Any]: __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_object('''CONSTANT''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , '''\nCONSTANT = None\n''' ) __SCREAMING_SNAKE_CASE : List[str] = create_dummy_object('''function''' , '''\'torch\'''' ) self.assertEqual( lowerCAmelCase__ , '''\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n''' ) __SCREAMING_SNAKE_CASE : int = ''' class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') ''' __SCREAMING_SNAKE_CASE : Optional[Any] = create_dummy_object('''FakeClass''' , '''\'torch\'''' ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __magic_name__( self :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = '''# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, ["torch"]) class FakeClass(metaclass=DummyObject): _backends = ["torch"] def __init__(self, *args, **kwargs): requires_backends(self, ["torch"]) ''' __SCREAMING_SNAKE_CASE : List[Any] = create_dummy_files({'''torch''': ['''CONSTANT''', '''function''', '''FakeClass''']} ) self.assertEqual(dummy_files['''torch'''] , lowerCAmelCase__ )
696
0
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable _lowercase : Any = {'configuration_dpt': ['DPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DPTConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Tuple = ['DPTFeatureExtractor'] _lowercase : List[Any] = ['DPTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : str = [ 'DPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DPTForDepthEstimation', 'DPTForSemanticSegmentation', 'DPTModel', 'DPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys _lowercase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
49
import math from numpy import inf from scipy.integrate import quad def _UpperCamelCase ( lowercase__ ): if num <= 0: raise ValueError('''math domain error''' ) return quad(lowercase__ , 0 , lowercase__ , args=(lowercase__) )[0] def _UpperCamelCase ( lowercase__ , lowercase__ ): return math.pow(lowercase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
696
0
from __future__ import annotations def _lowerCamelCase ( lowerCamelCase_: Union[str, Any] ): '''simple docstring''' A : Dict = 0.0_0 A : Union[str, Any] = 0 for resistor in resistors: if resistor <= 0: A : List[Any] = f"""Resistor at index {index} has a negative or zero value!""" raise ValueError(lowercase__ ) first_sum += 1 / float(lowercase__ ) index += 1 return 1 / first_sum def _lowerCamelCase ( lowerCamelCase_: str ): '''simple docstring''' A : str = 0.0_0 A : str = 0 for resistor in resistors: sum_r += resistor if resistor < 0: A : Optional[Any] = f"""Resistor at index {index} has a negative value!""" raise ValueError(lowercase__ ) index += 1 return sum_r if __name__ == "__main__": import doctest doctest.testmod()
256
def _UpperCamelCase ( lowercase__ , lowercase__ , lowercase__ ): if principal <= 0: raise Exception('''Principal borrowed must be > 0''' ) if rate_per_annum < 0: raise Exception('''Rate of interest must be >= 0''' ) if years_to_repay <= 0 or not isinstance(lowercase__ , lowercase__ ): raise Exception('''Years to repay must be an integer > 0''' ) # Yearly rate is divided by 12 to get monthly rate __SCREAMING_SNAKE_CASE : int = rate_per_annum / 12 # Years to repay is multiplied by 12 to get number of payments as payment is monthly __SCREAMING_SNAKE_CASE : Union[str, Any] = years_to_repay * 12 return ( principal * rate_per_month * (1 + rate_per_month) ** number_of_payments / ((1 + rate_per_month) ** number_of_payments - 1) ) if __name__ == "__main__": import doctest doctest.testmod()
696
0
"""simple docstring""" import time import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers.generation import ( MaxLengthCriteria, MaxNewTokensCriteria, MaxTimeCriteria, StoppingCriteriaList, validate_stopping_criteria, ) @require_torch class _lowercase ( unittest.TestCase ): def lowerCAmelCase__ ( self , UpperCamelCase_ ): __magic_name__ = 3 __magic_name__ = 250 __magic_name__ = ids_tensor((batch_size, length) , lowerCAmelCase__ ) __magic_name__ = torch.ones((batch_size, length) , device=lowerCAmelCase__ , dtype=torch.float ) / length return input_ids, scores def lowerCAmelCase__ ( self ): __magic_name__ = self._get_tensors(5 ) __magic_name__ = StoppingCriteriaList( [ MaxLengthCriteria(max_length=10 ), MaxTimeCriteria(max_time=0.1 ), ] ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def lowerCAmelCase__ ( self ): __magic_name__ = MaxLengthCriteria(max_length=10 ) __magic_name__ = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def lowerCAmelCase__ ( self ): __magic_name__ = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 ) __magic_name__ = self._get_tensors(5 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ = self._get_tensors(9 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ = self._get_tensors(10 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ = StoppingCriteriaList([criteria] ) self.assertEqual(criteria_list.max_length , 10 ) def lowerCAmelCase__ ( self ): __magic_name__ = self._get_tensors(5 ) __magic_name__ = MaxTimeCriteria(max_time=0.1 ) self.assertFalse(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) __magic_name__ = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 ) self.assertTrue(criteria(lowerCAmelCase__ , lowerCAmelCase__ ) ) def lowerCAmelCase__ ( self ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 10 ) with self.assertWarns(lowerCAmelCase__ ): validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(10 )] ) , 11 ) __magic_name__ = validate_stopping_criteria(StoppingCriteriaList() , 11 ) self.assertEqual(len(lowerCAmelCase__ ) , 1 )
490
from maths.is_square_free import is_square_free from maths.prime_factors import prime_factors def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Tuple = prime_factors(lowercase__ ) if is_square_free(lowercase__ ): return -1 if len(lowercase__ ) % 2 else 1 return 0 if __name__ == "__main__": import doctest doctest.testmod()
696
0
'''simple docstring''' from __future__ import annotations from collections.abc import Sequence from typing import Literal def __UpperCAmelCase ( a_: Tuple, a_: Any ): _UpperCAmelCase : Optional[int] = list(lowercase__ ) _UpperCAmelCase : Optional[int] = list(lowercase__ ) _UpperCAmelCase : Union[str, Any] = 0 for i in range(len(lowercase__ ) ): if lista[i] != lista[i]: count += 1 _UpperCAmelCase : List[Any] = '''_''' if count > 1: return False else: return "".join(lowercase__ ) def __UpperCAmelCase ( a_: Optional[int] ): _UpperCAmelCase : int = [] while True: _UpperCAmelCase : Tuple = ['''$'''] * len(lowercase__ ) _UpperCAmelCase : Any = [] for i in range(len(lowercase__ ) ): for j in range(i + 1, len(lowercase__ ) ): _UpperCAmelCase : str = compare_string(binary[i], binary[j] ) if k is False: _UpperCAmelCase : List[str] = '''*''' _UpperCAmelCase : Dict = '''*''' temp.append("X" ) for i in range(len(lowercase__ ) ): if checka[i] == "$": pi.append(binary[i] ) if len(lowercase__ ) == 0: return pi _UpperCAmelCase : Any = list(set(lowercase__ ) ) def __UpperCAmelCase ( a_: Optional[Any], a_: Optional[int] ): _UpperCAmelCase : Optional[int] = [] for minterm in minterms: _UpperCAmelCase : List[str] = '''''' for _ in range(lowercase__ ): _UpperCAmelCase : int = str(minterm % 2 ) + string minterm //= 2 temp.append(lowercase__ ) return temp def __UpperCAmelCase ( a_: Union[str, Any], a_: Tuple, a_: Optional[int] ): _UpperCAmelCase : Any = list(lowercase__ ) _UpperCAmelCase : int = list(lowercase__ ) _UpperCAmelCase : Any = 0 for i in range(len(lowercase__ ) ): if lista[i] != lista[i]: count_n += 1 return count_n == count def __UpperCAmelCase ( a_: Tuple, a_: str ): _UpperCAmelCase : Union[str, Any] = [] _UpperCAmelCase : List[str] = [0] * len(lowercase__ ) for i in range(len(chart[0] ) ): _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : str = -1 for j in range(len(lowercase__ ) ): if chart[j][i] == 1: count += 1 _UpperCAmelCase : int = j if count == 1: _UpperCAmelCase : Any = 1 for i in range(len(lowercase__ ) ): if select[i] == 1: for j in range(len(chart[0] ) ): if chart[i][j] == 1: for k in range(len(lowercase__ ) ): _UpperCAmelCase : str = 0 temp.append(prime_implicants[i] ) while True: _UpperCAmelCase : List[str] = 0 _UpperCAmelCase : Any = -1 _UpperCAmelCase : Dict = 0 for i in range(len(lowercase__ ) ): _UpperCAmelCase : str = chart[i].count(1 ) if count_n > max_n: _UpperCAmelCase : List[str] = count_n _UpperCAmelCase : Optional[int] = i if max_n == 0: return temp temp.append(prime_implicants[rem] ) for i in range(len(chart[0] ) ): if chart[rem][i] == 1: for j in range(len(lowercase__ ) ): _UpperCAmelCase : Optional[int] = 0 def __UpperCAmelCase ( a_: Optional[Any], a_: Optional[int] ): _UpperCAmelCase : List[Any] = [[0 for x in range(len(lowercase__ ) )] for x in range(len(lowercase__ ) )] for i in range(len(lowercase__ ) ): _UpperCAmelCase : Optional[Any] = prime_implicants[i].count("_" ) for j in range(len(lowercase__ ) ): if is_for_table(prime_implicants[i], binary[j], lowercase__ ): _UpperCAmelCase : Dict = 1 return chart def __UpperCAmelCase ( ): _UpperCAmelCase : int = int(input("Enter the no. of variables\n" ) ) _UpperCAmelCase : List[Any] = [ float(lowercase__ ) for x in input( "Enter the decimal representation of Minterms \'Spaces Separated\'\n" ).split() ] _UpperCAmelCase : Optional[int] = decimal_to_binary(lowercase__, lowercase__ ) _UpperCAmelCase : Optional[Any] = check(lowercase__ ) print("Prime Implicants are:" ) print(lowercase__ ) _UpperCAmelCase : Any = prime_implicant_chart(lowercase__, lowercase__ ) _UpperCAmelCase : List[str] = selection(lowercase__, lowercase__ ) print("Essential Prime Implicants are:" ) print(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod() main()
494
import json import os import shutil import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoConfig, BertConfig, GPTaConfig from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import TOKEN, USER, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 __lowerCAmelCase : int ={ 'return_dict': False, 'output_hidden_states': True, 'output_attentions': True, 'torchscript': True, 'torch_dtype': 'float16', 'use_bfloat16': True, 'tf_legacy_loss': True, 'pruned_heads': {'a': 1}, 'tie_word_embeddings': False, 'is_decoder': True, 'cross_attention_hidden_size': 1_2_8, 'add_cross_attention': True, 'tie_encoder_decoder': True, 'max_length': 5_0, 'min_length': 3, 'do_sample': True, 'early_stopping': True, 'num_beams': 3, 'num_beam_groups': 3, 'diversity_penalty': 0.5, 'temperature': 2.0, 'top_k': 1_0, 'top_p': 0.7, 'typical_p': 0.2, 'repetition_penalty': 0.8, 'length_penalty': 0.8, 'no_repeat_ngram_size': 5, 'encoder_no_repeat_ngram_size': 5, 'bad_words_ids': [1, 2, 3], 'num_return_sequences': 3, 'chunk_size_feed_forward': 5, 'output_scores': True, 'return_dict_in_generate': True, 'forced_bos_token_id': 2, 'forced_eos_token_id': 3, 'remove_invalid_values': True, 'architectures': ['BertModel'], 'finetuning_task': 'translation', 'id2label': {0: 'label'}, 'label2id': {'label': '0'}, 'tokenizer_class': 'BertTokenizerFast', 'prefix': 'prefix', 'bos_token_id': 6, 'pad_token_id': 7, 'eos_token_id': 8, 'sep_token_id': 9, 'decoder_start_token_id': 1_0, 'exponential_decay_length_penalty': (5, 1.0_1), 'suppress_tokens': [0, 1], 'begin_suppress_tokens': 2, 'task_specific_params': {'translation': 'some_params'}, 'problem_type': 'regression', } @is_staging_test class _lowercase ( unittest.TestCase ): '''simple docstring''' @classmethod def __magic_name__( cls :Optional[Any] ) -> Any: __SCREAMING_SNAKE_CASE : str = TOKEN HfFolder.save_token(lowerCAmelCase__ ) @classmethod def __magic_name__( cls :List[str] ) -> List[str]: try: delete_repo(token=cls._token , repo_id='''test-config''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-config-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-config''' ) except HTTPError: pass def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : int = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''test-config''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Tuple = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-config''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained(lowerCAmelCase__ , repo_id='''test-config''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :int ) -> Optional[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig( vocab_size=99 , hidden_size=32 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=37 ) config.push_to_hub('''valid_org/test-config-org''' , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : Any = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-config-org''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: config.save_pretrained( lowerCAmelCase__ , repo_id='''valid_org/test-config-org''' , push_to_hub=lowerCAmelCase__ , use_auth_token=self._token ) __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''valid_org/test-config-org''' ) for k, v in config.to_dict().items(): if k != "transformers_version": self.assertEqual(lowerCAmelCase__ , getattr(lowerCAmelCase__ , lowerCAmelCase__ ) ) def __magic_name__( self :Dict ) -> Optional[int]: CustomConfig.register_for_auto_class() __SCREAMING_SNAKE_CASE : Tuple = CustomConfig(attribute=42 ) config.push_to_hub('''test-dynamic-config''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual(config.auto_map , {'''AutoConfig''': '''custom_configuration.CustomConfig'''} ) __SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''' , trust_remote_code=lowerCAmelCase__ ) # Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module self.assertEqual(new_config.__class__.__name__ , '''CustomConfig''' ) self.assertEqual(new_config.attribute , 42 ) class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :List[str] ) -> Dict: __SCREAMING_SNAKE_CASE : Optional[Any] = GPTaConfig() # attempt to modify each of int/float/bool/str config records and verify they were updated __SCREAMING_SNAKE_CASE : Optional[Any] = c.n_embd + 1 # int __SCREAMING_SNAKE_CASE : Optional[Any] = c.resid_pdrop + 1.0 # float __SCREAMING_SNAKE_CASE : Dict = not c.scale_attn_weights # bool __SCREAMING_SNAKE_CASE : Optional[int] = c.summary_type + '''foo''' # str c.update_from_string( f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' ) self.assertEqual(lowerCAmelCase__ , c.n_embd , '''mismatch for key: n_embd''' ) self.assertEqual(lowerCAmelCase__ , c.resid_pdrop , '''mismatch for key: resid_pdrop''' ) self.assertEqual(lowerCAmelCase__ , c.scale_attn_weights , '''mismatch for key: scale_attn_weights''' ) self.assertEqual(lowerCAmelCase__ , c.summary_type , '''mismatch for key: summary_type''' ) def __magic_name__( self :Dict ) -> str: __SCREAMING_SNAKE_CASE : Dict = PretrainedConfig() __SCREAMING_SNAKE_CASE : str = [key for key in base_config.__dict__ if key not in config_common_kwargs] # If this part of the test fails, you have arguments to addin config_common_kwargs above. self.assertListEqual( lowerCAmelCase__ , ['''is_encoder_decoder''', '''_name_or_path''', '''_commit_hash''', '''transformers_version'''] ) __SCREAMING_SNAKE_CASE : List[Any] = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCAmelCase__ , lowerCAmelCase__ )] if len(lowerCAmelCase__ ) > 0: raise ValueError( '''The following keys are set with the default values in''' ''' `test_configuration_common.config_common_kwargs` pick another value for them:''' f''' {', '.join(lowerCAmelCase__ )}.''' ) def __magic_name__( self :Union[str, Any] ) -> List[Any]: with self.assertRaises(lowerCAmelCase__ ): # config is in subfolder, the following should not work without specifying the subfolder __SCREAMING_SNAKE_CASE : List[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' ) __SCREAMING_SNAKE_CASE : List[str] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert-subfolder''' , subfolder='''bert''' ) self.assertIsNotNone(lowerCAmelCase__ ) def __magic_name__( self :List[Any] ) -> Optional[Any]: # A mock response for an HTTP head request to emulate server down __SCREAMING_SNAKE_CASE : Union[str, Any] = mock.Mock() __SCREAMING_SNAKE_CASE : List[Any] = 500 __SCREAMING_SNAKE_CASE : Union[str, Any] = {} __SCREAMING_SNAKE_CASE : Optional[Any] = HTTPError __SCREAMING_SNAKE_CASE : str = {} # Download this model to make sure it's in the cache. __SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=lowerCAmelCase__ ) as mock_head: __SCREAMING_SNAKE_CASE : Optional[Any] = BertConfig.from_pretrained('''hf-internal-testing/tiny-random-bert''' ) # This check we did call the fake head request mock_head.assert_called() def __magic_name__( self :Union[str, Any] ) -> List[Any]: # This test is for deprecated behavior and can be removed in v5 __SCREAMING_SNAKE_CASE : Optional[int] = BertConfig.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json''' ) def __magic_name__( self :str ) -> List[str]: __SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained('''bert-base-cased''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = ['''config.4.0.0.json'''] with tempfile.TemporaryDirectory() as tmp_dir: configuration.save_pretrained(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = 2 json.dump(configuration.to_dict() , open(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , '''w''' ) ) # This should pick the new configuration file as the version of Transformers is > 4.0.0 __SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # Will need to be adjusted if we reach v42 and this test is still here. # Should pick the old configuration file as the version of Transformers is < 4.42.0 __SCREAMING_SNAKE_CASE : List[Any] = ['''config.42.0.0.json'''] __SCREAMING_SNAKE_CASE : Tuple = 768 configuration.save_pretrained(lowerCAmelCase__ ) shutil.move(os.path.join(lowerCAmelCase__ , '''config.4.0.0.json''' ) , os.path.join(lowerCAmelCase__ , '''config.42.0.0.json''' ) ) __SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 768 ) def __magic_name__( self :List[str] ) -> Union[str, Any]: # This repo has two configuration files, one for v4.0.0 and above with a different hidden size. __SCREAMING_SNAKE_CASE : Union[str, Any] = '''hf-internal-testing/test-two-configs''' import transformers as new_transformers __SCREAMING_SNAKE_CASE : int = '''v4.0.0''' __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[Any] = new_transformers.models.auto.AutoConfig.from_pretrained( lowerCAmelCase__ , return_unused_kwargs=lowerCAmelCase__ ) self.assertEqual(new_configuration.hidden_size , 2 ) # This checks `_configuration_file` ia not kept in the kwargs by mistake. self.assertDictEqual(lowerCAmelCase__ , {} ) # Testing an older version by monkey-patching the version in the module it's used. import transformers as old_transformers __SCREAMING_SNAKE_CASE : List[str] = '''v3.0.0''' __SCREAMING_SNAKE_CASE : Any = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCAmelCase__ ) self.assertEqual(old_configuration.hidden_size , 768 )
696
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class SCREAMING_SNAKE_CASE__ ( A__ ): __lowerCAmelCase : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
160
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __lowerCAmelCase : Any ={ 'configuration_llama': ['LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LlamaConfig'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int =['LlamaTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] =['LlamaTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str =[ 'LlamaForCausalLM', 'LlamaModel', 'LlamaPreTrainedModel', 'LlamaForSequenceClassification', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
696
0
def _lowerCamelCase ( __lowerCamelCase ) -> Optional[Any]: '''simple docstring''' if not isinstance(lowercase__ , lowercase__ ): raise TypeError("""only integers accepted as input""" ) else: UpperCAmelCase__ : int = str(abs(lowercase__ ) ) UpperCAmelCase__ : Union[str, Any] = [list(lowercase__ ) for char in range(len(lowercase__ ) )] for index in range(len(lowercase__ ) ): num_transpositions[index].pop(lowercase__ ) return max( int("""""".join(list(lowercase__ ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__("""doctest""").testmod()
79
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Dict =logging.get_logger(__name__) __lowerCAmelCase : List[Any] ={ 'google/switch-base-8': 'https://huggingface.co/google/switch-base-8/blob/main/config.json', } class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = '''switch_transformers''' SCREAMING_SNAKE_CASE__ : Optional[int] = ['''past_key_values'''] SCREAMING_SNAKE_CASE__ : str = {'''hidden_size''': '''d_model''', '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers'''} def __init__( self :Optional[int] , lowerCAmelCase__ :Union[str, Any]=32_128 , lowerCAmelCase__ :int=768 , lowerCAmelCase__ :Optional[Any]=64 , lowerCAmelCase__ :List[str]=2_048 , lowerCAmelCase__ :Optional[int]=64 , lowerCAmelCase__ :Union[str, Any]=12 , lowerCAmelCase__ :Optional[Any]=3 , lowerCAmelCase__ :Tuple=12 , lowerCAmelCase__ :Optional[int]=3 , lowerCAmelCase__ :Optional[int]=12 , lowerCAmelCase__ :Optional[Any]=8 , lowerCAmelCase__ :Tuple=False , lowerCAmelCase__ :List[Any]=0.01 , lowerCAmelCase__ :Any="float32" , lowerCAmelCase__ :int=False , lowerCAmelCase__ :int=32 , lowerCAmelCase__ :Optional[Any]=128 , lowerCAmelCase__ :Optional[Any]=0.1 , lowerCAmelCase__ :str=1E-6 , lowerCAmelCase__ :Tuple=0.001 , lowerCAmelCase__ :List[Any]=0.001 , lowerCAmelCase__ :Union[str, Any]=1.0 , lowerCAmelCase__ :Tuple="relu" , lowerCAmelCase__ :Dict=True , lowerCAmelCase__ :Optional[int]=False , lowerCAmelCase__ :List[Any]=True , lowerCAmelCase__ :List[Any]=0 , lowerCAmelCase__ :Union[str, Any]=1 , **lowerCAmelCase__ :List[str] , ) -> Tuple: __SCREAMING_SNAKE_CASE : Any = vocab_size __SCREAMING_SNAKE_CASE : Union[str, Any] = d_model __SCREAMING_SNAKE_CASE : Optional[int] = d_kv __SCREAMING_SNAKE_CASE : Tuple = d_ff __SCREAMING_SNAKE_CASE : Tuple = num_sparse_encoder_layers __SCREAMING_SNAKE_CASE : List[Any] = num_layers __SCREAMING_SNAKE_CASE : Union[str, Any] = ( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __SCREAMING_SNAKE_CASE : Optional[Any] = num_sparse_decoder_layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_encoder_layers > 0: __SCREAMING_SNAKE_CASE : List[Any] = self.num_layers // self.num_sparse_encoder_layers else: __SCREAMING_SNAKE_CASE : Tuple = self.num_layers # HACK: this will create 0 sparse layers # This tells us, each how many encoder layer we'll have to set a sparse layer. if self.num_sparse_decoder_layers > 0: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers else: __SCREAMING_SNAKE_CASE : Dict = self.num_decoder_layers # HACK: this will create 0 sparse layers __SCREAMING_SNAKE_CASE : List[Any] = num_heads __SCREAMING_SNAKE_CASE : List[Any] = num_experts __SCREAMING_SNAKE_CASE : Tuple = expert_capacity __SCREAMING_SNAKE_CASE : List[Any] = router_bias __SCREAMING_SNAKE_CASE : Optional[Any] = router_jitter_noise if router_dtype not in ["float32", "float16", "bfloat16"]: raise ValueError(f'''`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}''' ) __SCREAMING_SNAKE_CASE : List[Any] = router_dtype __SCREAMING_SNAKE_CASE : Optional[Any] = router_ignore_padding_tokens __SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets __SCREAMING_SNAKE_CASE : Any = relative_attention_max_distance __SCREAMING_SNAKE_CASE : Union[str, Any] = dropout_rate __SCREAMING_SNAKE_CASE : Dict = layer_norm_epsilon __SCREAMING_SNAKE_CASE : int = initializer_factor __SCREAMING_SNAKE_CASE : List[str] = feed_forward_proj __SCREAMING_SNAKE_CASE : Any = use_cache __SCREAMING_SNAKE_CASE : Union[str, Any] = add_router_probs __SCREAMING_SNAKE_CASE : int = router_z_loss_coef __SCREAMING_SNAKE_CASE : List[str] = router_aux_loss_coef __SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' ) __SCREAMING_SNAKE_CASE : Optional[int] = act_info[-1] __SCREAMING_SNAKE_CASE : Optional[Any] = act_info[0] == '''gated''' if len(lowerCAmelCase__ ) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__ ) > 2: raise ValueError( f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.''' '''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ''' '''\'gated-gelu\' or \'relu\'''' ) # for backwards compatibility if feed_forward_proj == "gated-gelu": __SCREAMING_SNAKE_CASE : List[Any] = '''gelu_new''' super().__init__( pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__ , )
696
0
import numpy # List of input, output pairs _UpperCAmelCase = ( ((5, 2, 3), 15), ((6, 5, 9), 25), ((11, 12, 13), 41), ((1, 1, 1), 8), ((11, 12, 13), 41), ) _UpperCAmelCase = (((515, 22, 13), 555), ((61, 35, 49), 150)) _UpperCAmelCase = [2, 4, 1, 5] _UpperCAmelCase = len(train_data) _UpperCAmelCase = 0.009 def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Union[str, Any] , SCREAMING_SNAKE_CASE :str="train" ) -> int: return calculate_hypothesis_value(lowercase__ , lowercase__ ) - output( lowercase__ , lowercase__ ) def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[str] ) -> Tuple: __lowerCAmelCase : Optional[int] = 0 for i in range(len(lowercase__ ) - 1 ): hyp_val += data_input_tuple[i] * parameter_vector[i + 1] hyp_val += parameter_vector[0] return hyp_val def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :List[str] ) -> List[Any]: if data_set == "train": return train_data[example_no][1] elif data_set == "test": return test_data[example_no][1] return None def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :Any , SCREAMING_SNAKE_CASE :Any ) -> int: if data_set == "train": return _hypothesis_value(train_data[example_no][0] ) elif data_set == "test": return _hypothesis_value(test_data[example_no][0] ) return None def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :List[Any] , SCREAMING_SNAKE_CASE :Optional[Any]=m ) -> Union[str, Any]: __lowerCAmelCase : List[str] = 0 for i in range(lowercase__ ): if index == -1: summation_value += _error(lowercase__ ) else: summation_value += _error(lowercase__ ) * train_data[i][0][index] return summation_value def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE :int ) -> List[Any]: __lowerCAmelCase : Optional[int] = summation_of_cost_derivative(lowercase__ , lowercase__ ) / m return cost_derivative_value def _SCREAMING_SNAKE_CASE ( ) -> int: global parameter_vector # Tune these values to set a tolerance value for predicted output __lowerCAmelCase : str = 0.00_00_02 __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : str = 0 while True: j += 1 __lowerCAmelCase : Union[str, Any] = [0, 0, 0, 0] for i in range(0 , len(lowercase__ ) ): __lowerCAmelCase : List[Any] = get_cost_derivative(i - 1 ) __lowerCAmelCase : Tuple = ( parameter_vector[i] - LEARNING_RATE * cost_derivative ) if numpy.allclose( lowercase__ , lowercase__ , atol=lowercase__ , rtol=lowercase__ , ): break __lowerCAmelCase : Any = temp_parameter_vector print(("""Number of iterations:""", j) ) def _SCREAMING_SNAKE_CASE ( ) -> Optional[Any]: for i in range(len(lowercase__ ) ): print(("""Actual output value:""", output(lowercase__ , """test""" )) ) print(("""Hypothesis output:""", calculate_hypothesis_value(lowercase__ , """test""" )) ) if __name__ == "__main__": run_gradient_descent() print('\nTesting gradient descent for a linear hypothesis function.\n') test_gradient_descent()
504
from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import torch from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available @dataclass class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[List[np.ndarray], torch.FloatTensor] try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipeline_text_to_video_synth import TextToVideoSDPipeline from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401 from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
696
0
'''simple docstring''' import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer lowerCamelCase : List[str] = ['gpt2'] lowerCamelCase : int = 'gpt2' if is_tf_available(): class A__ ( tf.Module ): def __init__( self : List[str] , _a : Any ) -> Tuple: '''simple docstring''' super().__init__() _SCREAMING_SNAKE_CASE =tokenizer _SCREAMING_SNAKE_CASE =AutoConfig.from_pretrained(lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =TFGPTaLMHeadModel.from_config(lowerCAmelCase__ ) @tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='text' ),) ) def A ( self : int , _a : List[str] ) -> List[str]: '''simple docstring''' _SCREAMING_SNAKE_CASE =self.tokenizer(lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =tokenized['''input_ids'''].to_tensor() _SCREAMING_SNAKE_CASE =tf.cast(input_ids_dense > 0 , tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) _SCREAMING_SNAKE_CASE =self.model(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ )['''logits'''] return outputs @require_tf @require_keras_nlp class A__ ( unittest.TestCase ): def A ( self : Union[str, Any] ) -> Optional[int]: '''simple docstring''' super().setUp() _SCREAMING_SNAKE_CASE =[GPTaTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in (TOKENIZER_CHECKPOINTS)] _SCREAMING_SNAKE_CASE =[TFGPTaTokenizer.from_pretrained(lowerCAmelCase__ ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) _SCREAMING_SNAKE_CASE =[ '''This is a straightforward English test sentence.''', '''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''', '''Now we\'re going to add some Chinese: 一 二 三 一二三''', '''And some much more rare Chinese: 齉 堃 齉堃''', '''Je vais aussi écrire en français pour tester les accents''', '''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''', ] _SCREAMING_SNAKE_CASE =list(zip(self.test_sentences , self.test_sentences[::-1] ) ) def A ( self : Optional[int] ) -> Union[str, Any]: '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ): for test_inputs in self.test_sentences: _SCREAMING_SNAKE_CASE =tokenizer([test_inputs] , return_tensors='tf' ) _SCREAMING_SNAKE_CASE =tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors _SCREAMING_SNAKE_CASE =python_outputs[key].numpy() _SCREAMING_SNAKE_CASE =tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(lowerCAmelCase__ , tf.intaa ) == tf_outputs_values ) ) @slow def A ( self : Dict ) -> Optional[int]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: _SCREAMING_SNAKE_CASE =tf.function(lowerCAmelCase__ ) for test_inputs in self.test_sentences: _SCREAMING_SNAKE_CASE =tf.constant(lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =compiled_tokenizer(lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =tf_tokenizer(lowerCAmelCase__ ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def A ( self : List[str] ) -> List[str]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: _SCREAMING_SNAKE_CASE =ModelToSave(tokenizer=lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =tf.convert_to_tensor([self.test_sentences[0]] ) _SCREAMING_SNAKE_CASE =model.serving(lowerCAmelCase__ ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: _SCREAMING_SNAKE_CASE =Path(lowerCAmelCase__ ) / '''saved.model''' tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={'serving_default': model.serving} ) _SCREAMING_SNAKE_CASE =tf.saved_model.load(lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =loaded_model.signatures['''serving_default'''](lowerCAmelCase__ )['''output_0'''] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def A ( self : List[str] ) -> Optional[Any]: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: _SCREAMING_SNAKE_CASE =tf.convert_to_tensor([self.test_sentences[0]] ) _SCREAMING_SNAKE_CASE =tf_tokenizer(lowerCAmelCase__ ) # Build model with some sample inputs _SCREAMING_SNAKE_CASE =tf_tokenizer.get_config() _SCREAMING_SNAKE_CASE =TFGPTaTokenizer.from_config(lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =model_from_config(lowerCAmelCase__ ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def A ( self : int ) -> str: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run _SCREAMING_SNAKE_CASE =12_3123 for max_length in [3, 5, 1024]: _SCREAMING_SNAKE_CASE =tf.convert_to_tensor([self.test_sentences[0]] ) _SCREAMING_SNAKE_CASE =tf_tokenizer(lowerCAmelCase__ , max_length=lowerCAmelCase__ ) _SCREAMING_SNAKE_CASE =out['''input_ids'''].numpy().shape[1] assert out_length == max_length
405
from datetime import datetime import requests def _UpperCamelCase ( lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[int] = '''https://downloadgram.net/wp-json/wppress/video-downloader/video?url=''' __SCREAMING_SNAKE_CASE : Tuple = requests.get(base_url + url ).json()[0]['''urls'''][0]['''src'''] return requests.get(lowercase__ ).content if __name__ == "__main__": __lowerCAmelCase : int =input('Enter Video/IGTV url: ').strip() __lowerCAmelCase : Union[str, Any] =f"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4""" with open(file_name, 'wb') as fp: fp.write(download_video(url)) print(f"""Done. Video saved to disk as {file_name}.""")
696
0
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch A = random.Random() def a(lowercase__ , lowercase__=1.0 , lowercase__=None , lowercase__=None ): '''simple docstring''' if rng is None: snake_case_ = global_rng snake_case_ = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class SCREAMING_SNAKE_CASE ( unittest.TestCase ): """simple docstring""" def __init__( self , __UpperCamelCase , __UpperCamelCase=7 , __UpperCamelCase=4_00 , __UpperCamelCase=20_00 , __UpperCamelCase=10 , __UpperCamelCase=1_60 , __UpperCamelCase=8 , __UpperCamelCase=0.0 , __UpperCamelCase=40_00 , __UpperCamelCase=False , __UpperCamelCase=True , ): """simple docstring""" snake_case_ = parent snake_case_ = batch_size snake_case_ = min_seq_length snake_case_ = max_seq_length snake_case_ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) snake_case_ = padding_value snake_case_ = sampling_rate snake_case_ = return_attention_mask snake_case_ = do_normalize snake_case_ = feature_size snake_case_ = chunk_length snake_case_ = hop_length def __lowerCAmelCase ( self ): """simple docstring""" return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __lowerCAmelCase ( self , __UpperCamelCase=False , __UpperCamelCase=False ): """simple docstring""" def _flatten(__UpperCamelCase ): return list(itertools.chain(*lowerCAmelCase__ ) ) if equal_length: snake_case_ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size snake_case_ = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: snake_case_ = [np.asarray(lowerCAmelCase__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class SCREAMING_SNAKE_CASE ( A__ , unittest.TestCase ): """simple docstring""" __A = WhisperFeatureExtractor if is_speech_available() else None def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = WhisperFeatureExtractionTester(self ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = feat_extract_first.save_pretrained(lowerCAmelCase__ )[0] check_json_file_has_correct_format(lowerCAmelCase__ ) snake_case_ = self.feature_extraction_class.from_pretrained(lowerCAmelCase__ ) snake_case_ = feat_extract_first.to_dict() snake_case_ = feat_extract_second.to_dict() snake_case_ = feat_extract_first.mel_filters snake_case_ = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = os.path.join(lowerCAmelCase__ , 'feat_extract.json' ) feat_extract_first.to_json_file(lowerCAmelCase__ ) snake_case_ = self.feature_extraction_class.from_json_file(lowerCAmelCase__ ) snake_case_ = feat_extract_first.to_dict() snake_case_ = feat_extract_second.to_dict() snake_case_ = feat_extract_first.mel_filters snake_case_ = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ ) ) self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 snake_case_ = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] snake_case_ = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs] # Test feature size snake_case_ = feature_extractor(lowerCAmelCase__ , padding='max_length' , return_tensors='np' ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input snake_case_ = feature_extractor(speech_inputs[0] , return_tensors='np' ).input_features snake_case_ = feature_extractor(np_speech_inputs[0] , return_tensors='np' ).input_features self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test batched snake_case_ = feature_extractor(lowerCAmelCase__ , return_tensors='np' ).input_features snake_case_ = feature_extractor(lowerCAmelCase__ , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. snake_case_ = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] snake_case_ = np.asarray(lowerCAmelCase__ ) snake_case_ = feature_extractor(lowerCAmelCase__ , return_tensors='np' ).input_features snake_case_ = feature_extractor(lowerCAmelCase__ , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) # Test truncation required snake_case_ = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )] snake_case_ = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs] snake_case_ = [x[: feature_extractor.n_samples] for x in speech_inputs] snake_case_ = [np.asarray(lowerCAmelCase__ ) for speech_input in speech_inputs_truncated] snake_case_ = feature_extractor(lowerCAmelCase__ , return_tensors='np' ).input_features snake_case_ = feature_extractor(lowerCAmelCase__ , return_tensors='np' ).input_features for enc_seq_a, enc_seq_a in zip(lowerCAmelCase__ , lowerCAmelCase__ ): self.assertTrue(np.allclose(lowerCAmelCase__ , lowerCAmelCase__ , atol=1E-3 ) ) def __lowerCAmelCase ( self ): """simple docstring""" import torch snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ = np.random.rand(1_00 , 32 ).astype(np.floataa ) snake_case_ = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: snake_case_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='np' ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) snake_case_ = feature_extractor.pad([{'input_features': inputs}] , return_tensors='pt' ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def __lowerCAmelCase ( self , __UpperCamelCase ): """simple docstring""" snake_case_ = load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' ) # automatic decoding with librispeech snake_case_ = ds.sort('id' ).select(range(lowerCAmelCase__ ) )[:num_samples]['''audio'''] return [x["array"] for x in speech_samples] def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = torch.tensor( [ 0.1193, -0.0946, -0.1098, -0.0196, 0.0225, -0.0690, -0.1736, 0.0951, 0.0971, -0.0817, -0.0702, 0.0162, 0.0260, 0.0017, -0.0192, -0.1678, 0.0709, -0.1867, -0.0655, -0.0274, -0.0234, -0.1884, -0.0516, -0.0554, -0.0274, -0.1425, -0.1423, 0.0837, 0.0377, -0.0854 ] ) # fmt: on snake_case_ = self._load_datasamples(1 ) snake_case_ = WhisperFeatureExtractor() snake_case_ = feature_extractor(lowerCAmelCase__ , return_tensors='pt' ).input_features self.assertEqual(input_features.shape , (1, 80, 30_00) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowerCAmelCase__ , atol=1E-4 ) ) def __lowerCAmelCase ( self ): """simple docstring""" snake_case_ = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) snake_case_ = self._load_datasamples(1 )[0] snake_case_ = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue snake_case_ = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowerCAmelCase__ )[0] self.assertTrue(np.all(np.mean(lowerCAmelCase__ ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowerCAmelCase__ ) - 1 ) < 1E-3 ) )
187
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class _lowercase ( A__ , A__ , A__ , unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline SCREAMING_SNAKE_CASE__ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} SCREAMING_SNAKE_CASE__ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE__ : Any = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE__ : Optional[int] = IMAGE_TO_IMAGE_IMAGE_PARAMS def __magic_name__( self :int ) -> Optional[int]: torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) __SCREAMING_SNAKE_CASE : str = PNDMScheduler(skip_prk_steps=lowerCAmelCase__ ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Any = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) __SCREAMING_SNAKE_CASE : Dict = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) __SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPTextModel(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :List[Any]=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Any = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase__ ) ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0] __SCREAMING_SNAKE_CASE : List[Any] = Image.fromarray(np.uinta(lowerCAmelCase__ ) ).convert('''RGB''' ) if str(lowerCAmelCase__ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE : Optional[Any] = torch.manual_seed(lowerCAmelCase__ ) else: __SCREAMING_SNAKE_CASE : Optional[int] = torch.Generator(device=lowerCAmelCase__ ).manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 2, '''guidance_scale''': 6.0, '''image_guidance_scale''': 1, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Union[str, Any] ) -> str: __SCREAMING_SNAKE_CASE : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Any = self.get_dummy_components() __SCREAMING_SNAKE_CASE : List[Any] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : int = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = '''french fries''' __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe(**lowerCAmelCase__ , negative_prompt=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = output.images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Dict: __SCREAMING_SNAKE_CASE : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : List[Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = [inputs['''prompt''']] * 2 __SCREAMING_SNAKE_CASE : Union[str, Any] = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0 __SCREAMING_SNAKE_CASE : int = torch.from_numpy(lowerCAmelCase__ ).unsqueeze(0 ).to(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Union[str, Any] = image / 2 + 0.5 __SCREAMING_SNAKE_CASE : Optional[Any] = image.permute(0 , 3 , 1 , 2 ) __SCREAMING_SNAKE_CASE : Any = image.repeat(2 , 1 , 1 , 1 ) __SCREAMING_SNAKE_CASE : List[Any] = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) __SCREAMING_SNAKE_CASE : Tuple = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Union[str, Any] ) -> Dict: __SCREAMING_SNAKE_CASE : Tuple = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Union[str, Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='''scaled_linear''' ) __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = sd_pipe.to(lowerCAmelCase__ ) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[int] = self.get_dummy_inputs(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Dict = sd_pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : str = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : List[str] = [round(lowerCAmelCase__ , 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(lowerCAmelCase__ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def __magic_name__( self :Tuple ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def __magic_name__( self :str ) -> List[Any]: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_dummy_components() __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = VaeImageProcessor(do_resize=lowerCAmelCase__ , do_normalize=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : str = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = pipe(**self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) )[0] __SCREAMING_SNAKE_CASE : Union[str, Any] = components['''vae'''] __SCREAMING_SNAKE_CASE : Tuple = self.get_dummy_inputs_by_type(lowerCAmelCase__ , input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): __SCREAMING_SNAKE_CASE : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode() __SCREAMING_SNAKE_CASE : Dict = pipe(**lowerCAmelCase__ )[0] __SCREAMING_SNAKE_CASE : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(lowerCAmelCase__ , 1E-4 , '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class _lowercase ( unittest.TestCase ): '''simple docstring''' def __magic_name__( self :Union[str, Any] ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def __magic_name__( self :int , lowerCAmelCase__ :Dict=0 ) -> Optional[int]: __SCREAMING_SNAKE_CASE : List[Any] = torch.manual_seed(lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : int = load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) __SCREAMING_SNAKE_CASE : Dict = { '''prompt''': '''turn him into a cyborg''', '''image''': image, '''generator''': generator, '''num_inference_steps''': 3, '''guidance_scale''': 7.5, '''image_guidance_scale''': 1.0, '''output_type''': '''numpy''', } return inputs def __magic_name__( self :Dict ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : str = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Optional[int] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Any ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = self.get_inputs() __SCREAMING_SNAKE_CASE : int = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : int = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : Dict = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Optional[int] ) -> Optional[int]: __SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Any = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : str = self.get_inputs() __SCREAMING_SNAKE_CASE : Optional[int] = pipe(**lowerCAmelCase__ ).images __SCREAMING_SNAKE_CASE : Union[str, Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) __SCREAMING_SNAKE_CASE : List[Any] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def __magic_name__( self :Dict ) -> Tuple: __SCREAMING_SNAKE_CASE : List[Any] = 0 def callback_fn(lowerCAmelCase__ :int , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor ) -> None: __SCREAMING_SNAKE_CASE : Dict = True nonlocal number_of_steps number_of_steps += 1 if step == 1: __SCREAMING_SNAKE_CASE : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : Tuple = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: __SCREAMING_SNAKE_CASE : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) __SCREAMING_SNAKE_CASE : List[str] = latents[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE : str = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 __SCREAMING_SNAKE_CASE : List[str] = False __SCREAMING_SNAKE_CASE : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Union[str, Any] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() pipe(**lowerCAmelCase__ , callback=lowerCAmelCase__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __magic_name__( self :List[str] ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() __SCREAMING_SNAKE_CASE : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=lowerCAmelCase__ , torch_dtype=torch.floataa ) __SCREAMING_SNAKE_CASE : Optional[int] = pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() __SCREAMING_SNAKE_CASE : Dict = self.get_inputs() __SCREAMING_SNAKE_CASE : List[Any] = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : Tuple = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __magic_name__( self :int ) -> Tuple: __SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 __SCREAMING_SNAKE_CASE : int = inputs['''image'''].resize((504, 504) ) __SCREAMING_SNAKE_CASE : Optional[int] = '''timbrooks/instruct-pix2pix''' __SCREAMING_SNAKE_CASE : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , ) pipe.to(lowerCAmelCase__ ) pipe.set_progress_bar_config(disable=lowerCAmelCase__ ) pipe.enable_attention_slicing() __SCREAMING_SNAKE_CASE : Any = pipe(**lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = output.images[0] __SCREAMING_SNAKE_CASE : str = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) __SCREAMING_SNAKE_CASE : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
696
0
import warnings from diffusers import StableDiffusionImgaImgPipeline # noqa F401 warnings.warn( 'The `image_to_image.py` script is outdated. Please use directly `from diffusers import' ' StableDiffusionImg2ImgPipeline` instead.' )
121
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion # and https://github.com/hojonathanho/diffusion import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import torch from diffusers.configuration_utils import ConfigMixin, register_to_config from diffusers.schedulers.scheduling_utils import SchedulerMixin from diffusers.utils import BaseOutput, deprecate @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM class _lowercase ( A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : torch.FloatTensor SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None def _UpperCamelCase ( lowercase__ , lowercase__=0.999 , lowercase__="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(lowercase__ ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowercase__ ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __SCREAMING_SNAKE_CASE : List[Any] = [] for i in range(lowercase__ ): __SCREAMING_SNAKE_CASE : Optional[Any] = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE : List[Any] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowercase__ ) / alpha_bar_fn(lowercase__ ) , lowercase__ ) ) return torch.tensor(lowercase__ , dtype=torch.floataa ) class _lowercase ( A__ , A__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = 1 @register_to_config def __init__( self :Dict , lowerCAmelCase__ :int = 1_000 , lowerCAmelCase__ :float = 0.0001 , lowerCAmelCase__ :float = 0.02 , lowerCAmelCase__ :str = "linear" , lowerCAmelCase__ :Optional[Union[np.ndarray, List[float]]] = None , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :bool = True , lowerCAmelCase__ :int = 0 , lowerCAmelCase__ :str = "epsilon" , lowerCAmelCase__ :float = 1.0 , **lowerCAmelCase__ :int , ) -> Union[str, Any]: if kwargs.get('''set_alpha_to_one''' , lowerCAmelCase__ ) is not None: __SCREAMING_SNAKE_CASE : Optional[int] = ( '''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.''' ) deprecate('''set_alpha_to_one''' , '''1.0.0''' , lowerCAmelCase__ , standard_warn=lowerCAmelCase__ ) __SCREAMING_SNAKE_CASE : List[str] = kwargs['''set_alpha_to_one'''] if trained_betas is not None: __SCREAMING_SNAKE_CASE : Any = torch.tensor(lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "linear": __SCREAMING_SNAKE_CASE : str = torch.linspace(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __SCREAMING_SNAKE_CASE : List[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , lowerCAmelCase__ , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __SCREAMING_SNAKE_CASE : Optional[Any] = betas_for_alpha_bar(lowerCAmelCase__ ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __SCREAMING_SNAKE_CASE : Optional[int] = 1.0 - self.betas __SCREAMING_SNAKE_CASE : int = torch.cumprod(self.alphas , dim=0 ) # At every step in inverted ddim, we are looking into the next alphas_cumprod # For the final step, there is no next alphas_cumprod, and the index is out of bounds # `set_alpha_to_zero` decides whether we set this parameter simply to zero # in this case, self.step() just output the predicted noise # or whether we use the final alpha of the "non-previous" one. __SCREAMING_SNAKE_CASE : int = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1] # standard deviation of the initial noise distribution __SCREAMING_SNAKE_CASE : Any = 1.0 # setable values __SCREAMING_SNAKE_CASE : Optional[Any] = None __SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(np.arange(0 , lowerCAmelCase__ ).copy().astype(np.intaa ) ) def __magic_name__( self :List[str] , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :Optional[int] = None ) -> torch.FloatTensor: return sample def __magic_name__( self :str , lowerCAmelCase__ :int , lowerCAmelCase__ :Union[str, torch.device] = None ) -> List[str]: if num_inference_steps > self.config.num_train_timesteps: raise ValueError( f'''`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:''' f''' {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle''' f''' maximal {self.config.num_train_timesteps} timesteps.''' ) __SCREAMING_SNAKE_CASE : Union[str, Any] = num_inference_steps __SCREAMING_SNAKE_CASE : Union[str, Any] = self.config.num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __SCREAMING_SNAKE_CASE : Optional[int] = (np.arange(0 , lowerCAmelCase__ ) * step_ratio).round().copy().astype(np.intaa ) __SCREAMING_SNAKE_CASE : List[Any] = torch.from_numpy(lowerCAmelCase__ ).to(lowerCAmelCase__ ) self.timesteps += self.config.steps_offset def __magic_name__( self :Tuple , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :int , lowerCAmelCase__ :torch.FloatTensor , lowerCAmelCase__ :float = 0.0 , lowerCAmelCase__ :bool = False , lowerCAmelCase__ :Optional[torch.FloatTensor] = None , lowerCAmelCase__ :bool = True , ) -> Union[DDIMSchedulerOutput, Tuple]: # 1. get previous step value (=t+1) __SCREAMING_SNAKE_CASE : Optional[Any] = timestep + self.config.num_train_timesteps // self.num_inference_steps # 2. compute alphas, betas # change original implementation to exactly match noise levels for analogous forward process __SCREAMING_SNAKE_CASE : Any = self.alphas_cumprod[timestep] __SCREAMING_SNAKE_CASE : str = ( self.alphas_cumprod[prev_timestep] if prev_timestep < self.config.num_train_timesteps else self.final_alpha_cumprod ) __SCREAMING_SNAKE_CASE : int = 1 - alpha_prod_t # 3. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 __SCREAMING_SNAKE_CASE : List[Any] = model_output elif self.config.prediction_type == "sample": __SCREAMING_SNAKE_CASE : List[str] = model_output __SCREAMING_SNAKE_CASE : Optional[Any] = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5 elif self.config.prediction_type == "v_prediction": __SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output __SCREAMING_SNAKE_CASE : Tuple = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or''' ''' `v_prediction`''' ) # 4. Clip or threshold "predicted x_0" if self.config.clip_sample: __SCREAMING_SNAKE_CASE : Dict = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) # 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Dict = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon # 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf __SCREAMING_SNAKE_CASE : Any = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction if not return_dict: return (prev_sample, pred_original_sample) return DDIMSchedulerOutput(prev_sample=lowerCAmelCase__ , pred_original_sample=lowerCAmelCase__ ) def __len__( self :Optional[int] ) -> List[Any]: return self.config.num_train_timesteps
696
0